{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\n /////////////////////////////////////////////////////\n //////////// style.css ////////////////////////\n /////////////////////////////////////////////////////\n\nhtml, body {\n margin: 0;\n padding: 0;\n}\ncanvas {\n display: block;\n}\n\n /////////////////////////////////////////////////////\n //////////// Beach.js //////////////////\n /////////////////////////////////////////////////////\n\nvar Beach = function (o){\n this.o = o;\n};\n\nBeach.prototype.drawBeach = function(){\n push();\n translate(this.o.x, this.o.y);\n\n //sea\n fill(5,75,185);\n rect(0, this.o.y + this.o.seaHeight, this.o.width, this.o.height);\n\n //sand\n fill(243, 212, 107);\n rect(0, this.o.y - this.o.height, this.o.width, this.o.height);\n\n //sun\n fill(255, 238, 54);\n ellipse(-50, 50, 50);\n pop();\n};\n\n\n /////////////////////////////////////////////////////\n //////////// sketch.js //////////////////\n /////////////////////////////////////////////////////\n\nvar b = new Beach({\n x: windowWidth/2,\n y: windowHeight/2,\n width: windowWidth,\n height: windowHeight,\n seaHeight: windowHeight / 4\n});\nfunction setup(){\n createCanvas(windowWidth, windowHeight);\n}\nfunction draw(){\n background(255);\n b.drawBeach();\n}\n @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ \n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":45,"cells":{"text":{"kind":"string","value":"3,521,297 A, July 27, 1969 P. D. LaDue ..................................................... 541 879 [75] Inventor: A. K. DeJong, 705 G Street, Suite 54 405/6 4.571,196 A, Oct. 14,1984 D. L. Moore et al. ....................................... 431879 1853.939.6, , 22 Aug. 1943 i L. C. Hirschfeld et al. 431879 (73) Assignee. New Jersey Bell Telephone Company, New [54] 13ark et al. (Nippon Telephone & Telegraph Corporation)........ 431879 60/462.56 X Jersey (US.) [21] Appl. No.: 685,812 H 80266 (Nippon Telephone & Telegraph Corporation)........ 268/37750 188,736.2 (Alcatel) ...................................... 268/37750 (22) Filed: April 30, 1985 (Mitsubishi Electric Corporation) ................................ 541879 814,498 (Motorola) ..................................... 544/376.35 (21) Appl. No.: 685,812 654,402 (Alcatel) ......................................... 268/36543 (US. 405,220 405,248 (Nippon Telephone & Telegraph Corp.) 431879 (US.) 22 Filed Apr. 30, 1985 [51] Int. Cl. 268/37705, 37750, 259/125, 6699 (Primary Examiner-William A. Day (58) Field of Search ..................................... 268/36523, 36543, (Attorney, Agent, or FirmAlvarez, Calderon, Hecht, 37705, 37750, 259/125, 117, 256/591, 465.16. 646; 541 879, 646.55; 6699, 29, 646.67, 669, 669.9; and Faull [56] Rof lations Cited 6700/78; 333/11 R UNITED STATES PATENTS 4,868,898 9/1989 Kino ................................... 268/36523 3,481.596 11/1969 Lockman et al. .................................. 333/319 4,979,241 12/1990 Mark et al. ......................................... 268/36523 3.832.602 8/1974 Horowitz .............................. 333/319 X 11 Claims, 10 Drawing Sheets"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":46,"cells":{"text":{"kind":"string","value":"article{{\n{{biblatex:tag1}}\n{{biblatex:tag2}}\n\n{{biblatex:dateyear}} {{biblatex:author}}\n\n{{biblatex:title}} {{biblatex:subtitle}},\n\nIn: {{biblatex:booktitle}} {{biblatex:booktitleaddon}},\n\nEdited by {{biblatex:editor}},\n\n{{biblatex:volumes}},\n\n{{biblatex:location}} {{biblatex:address}}: {{biblatex:publisher}},\n\n{{biblatex:pages}} ({{biblatex:doiprefix}}{{biblatex:doi}}).\n\n{{biblatex:howpublished}}\n\n{{biblatex:abstract}}\n}}}\n\n{{{1}}}Author, Date, Title, JournalName, Volume, Issue, Page (DOI) \n\n| biblatex = url = https://en.wikipedia.org/wiki/Help:Reflist\n}}\n\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":47,"cells":{"text":{"kind":"string","value":"GO:0000166\tphosphatase activity \tCatalysis of the hydrolysis of phosphate monoesters, with the release of inorganic phosphate. This is a general term; there are many specific subclasses, for example nucleotide, nucleoside, protein, histidyl, or 3-phosphoglycerate phosphatases. \t0\n\tGO:0000290\ttranscription regulator activity \tA generic term for proteins that regulate the transcription of a nucleic acid strand to form a complementary strand of RNA by RNA polymerase (Nucleic Acid Template), directly or via other regulator proteins or regulatory RNA molecules, and may include regulatory activities such as methylation, hydroxylation, acetylation and SUMOylation of either of histones or the transcription factor. \t0\n\tGO:0004602\ttelomerase holoenzyme complex \tThe telomerase complex is a ribonucleoprotein complex that consists of a highly conserved telomerase reverse transcriptase (TERT) and an RNA subunit (TR), plus other associated proteins. It exhibits reverse transcriptase activity and uses the RNA template to produce G-rich telomere repeats at the ends of chromosomes during DNA replication. \t0\n\tGO:0005634\tnucleus\t A membrane-bounded organelle of eukaryotic cells in which chromosomes are housed and replicated. In most cells, the nucleus contains all of the cell's chromosomes except the organellar chromosomes, and is the site of RNA synthesis and processing. In some species, or in specialized cell types, RNA metabolism or DNA replication may be absent. \t0\n\tGO:0005667\tchromosome Any of the DNA molecules that form the genetic material of a virus or an organism; each chromosome consists of two very long strands of DNA duplex plus associated nucleoproteins. In bacteria, there is normally a single chromosome, in eukaryotic organisms there are two or more chromosomes, and in viruses there may be one or more chromosomes. Chromosomes are normally visible only when they condense at metaphase and telophase during cell division. Chromosomes in eukaryotes exist in their highly extended state during interphase. In higher organisms chromosomes exist in the cell nucleus or other special organelles (mitochondria, plastids); prokaryotes have a single large nucleoid complex in their cytoplasm, which may be considered a chromosome. Some viruses have single- or double-stranded RNA instead of DNA for their genetic material; this is also referred to as a chromosome. \t0\n\tGO:0006355\tregulation of transcription, DNA-templated Any process that modulates the frequency, rate or extent of DNA-templated transcription. \t0\n\tGO:0008083\tprotein-DNA complex A protein that is physically associated with a DNA molecule (polydeoxyribonucleotide). \t0\n\tGO:0016597\tantioxidant activity 0\n\tGO:0019983\tregulation of molecular function Any process that modulates the frequency, rate or extent of a molecular function, any activity exerted by a gene product that results in its action on a single molecule or its interaction with another single molecule or ligand, which may or may not be another gene product. \t0\n\tGO:0042802\tidentical protein binding\t The binding of a macromolecule to another identical macromolecule, or to one of its subunits or complexes of subunits, or to a copy of itself. \t0\n\tGO:0046686\tresponse to stimulus\tThe process whose specific outcome is the progression of the organism over time, from its formation to the mature structure. An organism is the fundamental unit of life; it is an entity that maintains its integrity as a viable living system by acquiring resources from its environment and converting them into new cellular components. \t0\n\tGO:0061930\tregulation of cell organization or biogenesis\tThe process whose specific outcome is the progression of an organism over time from the initial condition of the zygote to the organism's mature form \t0\n\tGO:1903558\tresponse to nucleic acid Any process that results in a change in state or activity of a cell or an organism (in terms of movement, secretion, enzyme production, gene expression, etc.) as a result of a nucleic acid stimulus. \t0"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":48,"cells":{"text":{"kind":"string","value":"js-jquery】 $()、$.getJSON和$.ajax()的使用(含案例)\n 278\n6KB\n2023-10-29 09:53\n作者:li_0709\n【js-jquery】 $()、$.getJSON和$.ajax()的使用(含案例)\n<代码开始>\n$()、$.getJSON、$.ajax()的使用
\n<代码结束>\njquery 软件/插件 "},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":49,"cells":{"text":{"kind":"string","value":"recursion is a concept in computer science and programming where a function calls itself in order to solve a problem. In simpler terms, it is a method of solving a complex problem by breaking it down into smaller sub-problems of the same type, and then solving those sub-problems using the same approach.\n\nrecursion is a function which calls itself\n\n\n```python\ndef factorial(n):\n #print(n)\n if(n ==0 or n==1):\n return 1\n return n*factorial(n-1)\n\nfactorial(5)\n```\n\n\n\n\n 120\n\n\n\n1. Call Stack: Recursive calls in Python (and other programming languages) are managed using a call stack. The call stack is a data structure that keeps track of function calls in the order they occur. When a recursive function is called, a new frame (also called a stack frame) is pushed onto the call stack to store information about the current function call, including its arguments, local variables, and return address.\n\n2. Base Case: A recursive function must have a base case, which is a condition that causes the recursion to stop. Without a base case, the recursive calls would continue indefinitely, leading to a stack overflow error, where the call stack becomes too large and consumes all available memory.\n\n3. Recursive Calls: During each recursive call, the function is executed with different arguments, typically with the problem being divided into smaller sub-problems. The recursive calls create a chain of function calls, each adding a new frame to the call stack.\n\n4. Call Stack Management: The call stack manages the order in which function calls are executed and allows the Python interpreter to keep track of multiple function calls and their respective states. The function calls on the call stack are executed in Last-In-First-Out (LIFO) order, which means that the most recently called function is the first to be completed and removed from the stack.\n\n5. Return Values: When a recursive function encounters a base case, it returns a value without making any further recursive calls. The returned value then propagates back up the call stack to the previous function call, which uses it to compute its own result. This process continues until all recursive calls are completed, and the final result is obtained.\n\n\n```python\ndef fibonacci(n):\n # Base case: n is 0 or 1, return n\n if n == 0 or n == 1:\n return n\n \n # Recursive calls\n return fibonacci(n-1) + fibonacci(n-2)\n\nresult = fibonacci(5)\nprint(result)\n\n```\n\n 5\n\n\n\n```python\ndef binary_search(arr, target, left, right):\n # Base case: search range is empty, return -1\n if left > right:\n return -1\n \n # Calculate midpoint\n mid = (left + right) // 2\n \n # Check if target is at midpoint\n if arr[mid] == target:\n return mid\n \n # Recursive calls\n elif arr[mid] > target:\n return binary_search(arr, target, left, mid-1)\n else:\n return binary_search(arr, target, mid+1, right)\n\narr = [2, 4, 5, 6, 8, 10, 12, 15, 18, 20]\ntarget = 15\nresult = binary_search(arr, target, 0, len(arr)-1)\nprint(result)\n\n```\n\n 7\n\n\n\n```python\ndef reverse_string(s, left, right):\n # Base case: string has been fully reversed\n if left >= right:\n return s\n \n # Swap characters at left and right indices\n s[left], s[right] = s[right], s[left]\n \n # Recursive call to reverse substring\n return reverse_string(s, left+1, right-1)\n\ns = list(\"hello\")\nresult = reverse_string(s, 0, len(s)-1)\nprint(\"\".join(result))\n\n```\n\n olleh\n\n\n\n```python\ndef factorial(n):\n # Base case: n is 0 or 1, return 1\n if n == 0 or n == 1:\n return 1\n \n # Recursive call\n return n * factorial(n-1)\n\nresult = factorial(5)\nprint(result)\n\n```\n\n 120\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":50,"cells":{"text":{"kind":"string","value":" u a3bIK\nE O HOBIX MTODOB\n n . - . , - x B . HCKJIbKO yTpyaTebHee. Bcena. \n HO: OHA HE HOTOA N \n - . , \n , \n B , a rpaMaTM4ecKoro KycTpa. \n C , , , \n , - \n P IX I BOK. Hana BHO , - \n \n \n . \n - - , e x, - y e rOM H K. O , KO \n \n - I I O- r , H O O M r; \n e HO , OM, - \n3, B CHMMTOKM HHO-H 4HOCT. \n \n![figure](URL) \n , HO, O TTOCKHO. Iloxae a BaM, Caeka. \n .. \n![figure](URL) \n C. \n T, K o? \n \n \n - \n ? \n O, npe Bce 9TOMy - KTo T. \nI' : \n \n r . \n![figure](URL) \n![figure](URL) \n y M . , , . , \n . \n 06y 5, 6 801029 0-06 , 6 8. \n I , . I \n KJHOCT. CTOB 3H CJOB: CJIOBI M COBHH, COBH H OBO y OHO. M X O CHHT, H. \n 4I J, B OMy O C HO , e I XO. I H I IOT MHO K, - n, eHOCT KOTIX OI K M OMO. H. x I, KT - \n B OHOH 6X HX. I OT X O , , . , IX O- noco , Hax BO3MOKOCT KOHTOJ OCHO- \n OOHO, - e x - T IX HOBO-TexJ O, IX KOI, CTK- K . \n X , JI JH T X. KK BOHIO OJKIT, M 6 \n , , I . , T . , I , . \n![figure](URL) \n \n## npenonaBamenb\nC yHBePCNTET: \n MeKcyK BOCTOCKH \n J6MT.. A.C. Cepy30Ba. \n![figure](URL) \n \n## \n - \n K.JeKTBa. \n![figure](URL) \n \n . I. KpeB- , - \n X 3IK, COCTB CBI, HBI, H THIM , - T-6 . - . C. , . \n![figure](URL) \nC , , , B, B, 3B, O."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":51,"cells":{"text":{"kind":"string","value":"\nCREATE Procedure spr_list_OrderStatus(@SDate DateTime,@EDate DateTime, @CustomerID nvarchar(15),\n\t\t@OrderStatus nVarChar(1000) , @FilterFor nVarchar(2500),\n\t\t@RetainUserDefinedValues int,\n\t\t@FreeText varchar(128),@OrderID int) \nAs \n\nDeclare @TempCustomers Table (CustomerID nvarchar(15) Collate SQL_Latin1_General_CP1_CI_AS) \n\nIf @CustomerID = N'%' \n Insert Into @TempCustomers Select Distinct CustomerID From Customers \nElse \nBegin \n Insert Into @TempCustomers Select Distinct CustomerID From Customers \n Where CustomerID = @CustomerID Or @CustomerID = '*' And CustomerID Not IN (Select Distinct CustomerID From Customers Where IsNull(CustomerCategory,'') <> '') \nEnd\n\nDeclare @TempOrderStatus Table (OrderStatus nVarchar(1000) Collate SQL_Latin1_General_CP1_CI_AS) \n \nIf @OrderStatus = N'%' \n Insert Into @TempOrderStatus Select OrderStatus From OrderStatusMaster \nElse \nBegin \n\tInsert Into @TempOrderStatus Select Distinct OrderStatus From OrderStatusMaster Where OrderStatus = @OrderStatus\n\tOR @OrderStatus = '*' And OrderStatus Not IN (Select Distinct OrderStatus From OrderStatusMaster Where IsNull(RetainUserDefinedValues, 0) <> 0) \n\tAnd IsNull(RetainUserDefinedValues, 0) = 0 \nEnd\n\n--***********************************************//\nDeclare @UserDefinedOrderStatus Table ([Status] varchar(1000) Collate SQL_Latin1_General_CP1_CI_AS)\nif @RetainUserDefinedValues = 0\nInsert Into @UserDefinedOrderStatus select '---All---' Status\nelse\nIf isnull(@RetainUserDefinedValues,0) = 1 \nInsert Into @UserDefinedOrderStatus Select Distinct UserDefinedOrderStatus From Orders Where UserDefinedOrderStatus <> ''\n--***********************************************//\n\nSelect Customer_Name = Case IsNull(c.CustomerName, '') \n\t\t\t\t\t\tWhen '' Then r.StatusDescription\n\t\t\t\t\t\tElse r.Customer_Name End, \n\t\tTransactionID, DocumentDate, OrderID, Status, DocumentReference, \n\t\t@SDate [From], @EDate [To], '' CheckedFrom,\n\t\t'' CheckedTo,\n\t\t[Value], '' ValueCheckedFrom,\n\t\t'' ValueCheckedTo,\n\t\t'' PaymentTerms, '---All---' DeliveryTerms\nFrom Orders o Inner Join (\tSelect Distinct CustomerID From @TempCustomers) c on o.CustomerID = c.CustomerID \n\t\tInner Join (Select Distinct OrderStatus From @TempOrderStatus) OrStatus\n\t\ton o.OrderStatus = OrStatus.OrderStatus\t\n\t\tCross Join (Select * From @UserDefinedOrderStatus Where IsNull(UserDefinedOrderStatus, '') = '') r \nwhere o.OrderDate Between @SDate And @EDate \n\tAnd (IsNull(o.UserDefinedOrderStatus, '') = '' OR UserDefinedOrderStatus IN (Select Distinct UserDefinedOrderStatus From Orders Where UserDefinedOrderStatus <> '' And Isnull(RetainUserDefinedValues, 0) = 1))\n\tAnd DocumentReference + CAST(DocumentID as nvarchar) + CAST(o.DocumentDate as nvarchar) + (IsNull(Status, '') + Isnull(UserDefinedOrderStatus, '')) + \n\t\t\tConvert(NVarChar(100), Convert(int, o.BankID)) + (IsNull(PurcahseOrderNumber, '') + IsNull(DocumentReference, '') + IsNull(Reference, '')) + CustomerID Like '%' + @FreeText + '%'\n\tAnd (@FilterFor = N'All' \n\t\tOr (@FilterFor = N'Date' And Status in ('Uncollected', 'Collected', 'Rejected'))\n\t\tOr (@FilterFor = N'Customer' And Status not in ('Uncollected', 'Collected', 'Rejected')))\n\tAnd ((@OrderID = 0 And o.OrderID > 1) OR (@OrderID = 1 AND o.OrderID = 1) )\nGroup By Customer_Name, TransactionID, o.DocumentDate, Status, o.UserDefinedOrderStatus, r.StatusDescription\n\t, DocumentReference, o.DocumentID ,o.OrderID, o.BankID,\n\t\t\tISNULL(PurcahseOrderNumber,'')+ISNULL(DocumentReference,'')+ ISNULL(Reference,'') ,o.CustomerID,\n\t\t\tIsNull(Reference, '') "},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":52,"cells":{"text":{"kind":"string","value":"6]]]]]]>]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]-->The majority of the 1283 works in the Art on Paper collection originated in the United States or Great Britain and reflect many of the changes that have occurred in these countries over the last hundred years. The American section focuses on developments in the United States from the early 1960s to the mid 1990s and documents a broad range of visual approaches, themes and personal styles. Through these works, we see artists engaging with issues concerning social, environmental and gender-related problems and those relating to their personal lives and relationships. The British selection, predominantly from the 1970s and 1980s, deals with major British concerns of the period - the ways in which the state interacts with the individual, the growing preoccupation with and awareness of social issues, and concerns over the growing importance of technology and communications. The European section concentrates on issues unique to Europe and documents the influence of European life and culture on artists around the globe.\nThe collection includes a wide range of media; drawings, etchings, lithographs, aquatints and woodcuts, as well as works on paper created with oil and acrylic paints, pastel, gouache, collage, and mixed media. The primary concern of Art on Paper is to show works by emerging as well as established artists, with a particular commitment to supporting young or under-recognized artists. As a result, a significant portion of Art on Paper is by emerging artists and those artists who were beginning their careers in the 1960s.\nThe art on paper program is conceived as a gathering place for contemporary works on paper representing artists who exemplify a range of recent developments in visual practice and themes. The works have been assembled over the past 15 years and represent a number of important national and international artists including Faith Ringgold, Mary Heilmann, Alice Neel, Ann Hamilton, Christopher Wool, Chuck Close, and Sophie Calle. They were purchased in part with funds provided by the Works of Art Council.\nThe following artists are represented:\nEleanor Antin, Richard Artschwager, Victor Baeza, Michael Beesley, Louisa Burko, David Bush, Mary Carlisle, Pierre Castagna, Donald D'Aquisto, Patrick Demarchelier, John Di Martino, Paul Drummond, Deborah Fisher, Ralph Fitch, Terry Frost, Elizabeth Gee, Allan Giddy, A.R. Gonzales, Marina Grigorieva, David Hammons, Ann Hamilton, David Hockney, J. Jean Houston, Sam Ikin, Eric L. Jones, Marcia Marcus, Robin McKeen, Keith Milow, Alain Morisot, R.B. Kitaj, Joseph Kosuth, George Kuchar, Nobuyoshi Araki, Ashley Longshore, Jane Lombard, Daniel Marzona, Luigi Ontani, Pato O’Ward, Jala Wahid Peacock, Christopher Pope, Luciano Pignatelli, Joseph Raffael, Sorel, Jim Sinks, Wilke and Tress, Inge, Sean Scully, Ernesto Sova, Richard Tuttle, Sue Williams, Christopher Wool, Kenneth Yntema, David Zack."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":53,"cells":{"text":{"kind":"string","value":" /******************************************************************************\r\n\t * @file\t\t\tGpio.c\r\n\t *\r\n\t * @author\t\t\tMostafa Elsaied (https://github.com/Mostafa-Saied91)\r\n\t * @date\t\t\t31 August 2023\r\n\t * @version\t\t\t1.0\r\n\t *\r\n\t *******************************************************************************/\r\n\r\n/*-------------------------------------------------------------------------------------------------------------------\r\n * @attention\r\n *\r\n * Copyright (c) 2022 STMicroelectronics.\r\n * All rights reserved.\r\n *\r\n * This software is licensed under terms that can be found in the LICENSE file\r\n * in the root directory of this software component.\r\n * If no LICENSE file comes with this software, it is provided AS-IS.\r\n *\r\n *--------------------------------------------------------------------------------------------------------------------*/\r\n\r\n\r\n#include \"../Inc/Gpio.h\"\r\n\r\nvoid GPIO_Init(GPIO_TypeDef* GPIOx, GPIO_InitTypeDef* GPIO_InitStruct){\r\n\t/*----------------------------- GPIO Mode Configuration ------------------------*/\r\n\tif(GPIO_InitStruct->GPIO_Pin == 0xFFFFFFFF)\r\n\t\treturn;\r\n\tunsigned short num_of_pins_to_config = 1 + __CLZ(GPIO_InitStruct->GPIO_Pin);\r\n\tunsigned long pin_pos = 0x00000000UL;\r\n\r\n\r\n\tfor(unsigned short i=0; iGPIO_Pin);\r\n\r\n\t\t/* Configure the IO mode (Input, Output, Alternate or Analog) */\r\n\t\tGPIOx->MODER &= ~(GPIO_MODER_MODER0 << pin_pos) ;\r\n\t\tGPIOx->MODER |= GPIO_InitStruct->GPIO_Mode << pin_pos;\r\n\r\n\t\t/* Configure the IO speed */\r\n\t\tGPIOx->OSPEEDR &= ~(GPIO_OSPEEDR_OSPEED0 << pin_pos);\r\n\t\tGPIOx->OSPEEDR |= GPIO_InitStruct->GPIO_OType << pin_pos ;\r\n\r\n\t\t/*Configure the IO Output Type*/\r\n\t\tGPIOx->OSPEEDR &= ~(GPIO_OSPEEDR_OSPEED0 << pin_pos);\r\n\t\tGPIOx->OSPEEDR |= GPIO_InitStruct->GPIO_Speed << pin_pos ;\r\n\r\n\t\t/*Configure the IO Pull-up Pull down*/\r\n\t\tGPIOx->PUPDR &= ~(GPIO_PUPDR_PUPD0 << pin_pos) ;\r\n\t\tGPIOx->PUPDR |= GPIO_InitStruct->GPIO_PuPd << pin_pos;\r\n\t}\r\n}\r\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":54,"cells":{"text":{"kind":"string","value":"Is] it reasonable to say that we have a right to something if it is clear we could not obtain it without violating some right?\nMy first reaction was that since rights do not come in degrees, you don't have a right to the thing in question.\nIf a child is born on an iceberg far enough from shore that she would die before getting to it, even with parental aid, it's clearly her right to life that's most fundamental here. It's also clear that it's morally permissible to let her die. Is this because her right to life is not a right to be kept alive?\nIf one were to give her the chance to live by jumping from the iceberg into the water and hoping to make it to shore before she drowns, I think we'd have to say that in doing so, one violates her right not to be assaulted (jumping someone to make them swim for their life constitutes an assault) or even to bodily integrity. At the very least, we'd have to say that we violate some duty we owe her if we do it. But it seems strange to say that in doing something morally wrong to her, we protect a right we owe her. In any case, one can say that drowning is a far, far more awful fate for the child than the assault that is done to her by making her attempt to swim.\nOn reflection, it looks like the answer here is pretty clear: The child does not have a right to be kept alive because she would not have that right even if she were physically capable of making it to shore. Consider other cases. Does a child have a right to food if the parents are impoverished and unable to get it for her? Obviously not, because the parents would not be violating a right if they were simply too poor to afford food.\nSome of the goods whose possession is deemed to be a right are such that we could not have the right to them without violating someone else's rights or duties. For example, if we consider property rights on any robust Lockean conception, and we consider someone starving in the wilderness, we would not be obligated to allow them to take possession of something of ours to keep them alive.\nOther goods, on the other hand, are such that the ability to possess them cannot itself be possessed by right, because there may be conditions in which people can't obtain them without violating other people's rights. These are things like being kept alive or kept reasonably well fed if one is poor or landlocked on an iceberg in the middle of the ocean."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":55,"cells":{"text":{"kind":"string","value":"**/\n/*=============================================================================\n *\n * This file is a product of Criterion Software Ltd.\n *\n * This file is provided as is with no warranties of any kind and is\n * provided without any obligation on Criterion Software Ltd. or Canon Inc.\n * to assist in its use or modification.\n *\n * Criterion Software Ltd. and Canon Inc. will not, under any\n * circumstances, be liable for any lost revenue or other damages arising\n * from the use of this file.\n *\n * Copyright (c) 2002 Criterion Software Ltd.\n * All Rights Reserved.\n *\n *===========================================================================*/\n\n/****************************************************************************\n * *\n * Module: stream.c *\n * *\n * Purpose: *\n * Streaming *\n * *\n ****************************************************************************/\n#include \"rwcore.h\"\n\n#include \"rptools.h\"\n#include \"rphw.h\"\n\n#include \"rphw.h\"\n#include \"rpdbgerr.h\"\n\n#include \"binder.h\"\n#include \"binderint.h\"\n\n/* Local */\n#include \"stream.h\"\n\n/**\n *\n * \\ingroup rpworldbinder\n * \\def RpWorldBinderSetChunkSize\n *\n * \\brief Sets the chunk size for storing streamed data for\n * RpWorld objects. \n *\n * \\desc The default stream chunk size is 8192 bytes, which means that \n * every 8192 bytes, the data will be wrapped to the next chunk for\n * storage. The default chunk size is optimal for all current\n * platforms. However, a different chunk size can be used.\n *\n * \\param chunkSize - Data stream chunk size.\n *\n * \\return Data stream chunk size used.\n *\n * \\see RpWorldBinderGetChunkSize\n *\n */\nRpWorldBinderStreamChunkSize\nRpWorldBinderSetStreamChunkSize(RpWorldBinderStreamChunkSize chunkSize)\n{\n RWAPIFUNCTION(RWSTRING(\"RpWorldBinderSetChunkSize\"));\n RWASSERT(chunkSize > 0);\n RpfWorldBinderChunkSize = chunkSize;\n RWRETURN(RpfWorldBinderChunkSize);\n}\n\n/**\n *\n * \\ingroup rpworldbinder\n * \\def RpWorldBinderGetChunkSize\n *\n * \\brief Gets the chunk size for storing streamed data for\n * RpWorld objects. \n *\n * \\desc The default stream chunk size is 8192 bytes, which means that \n * every 8192 bytes, the data will be wrapped to the next chunk for\n * storage. The default chunk size is optimal for all current\n * platforms. However, a different chunk size can be used.\n *\n * \\return Data stream chunk size used.\n *\n * \\see RpWorldBinderSetChunkSize\n *\n */\nRpWorldBinderStreamChunkSize\nRpWorldBinderGetStreamChunkSize(void)\n{\n RWAPIFUNCTION(RWSTRING(\"RpWorldBinderGetChunkSize\"));\n RWRETURN(RpfWorldBinderChunkSize);\n}\n\n\n/****************************************************************************\n *\n * \\ingroup rpworldbinder\n * \\def RwBool RpWorldBinderStreamSize (const RpWorld *world, RwInt32 *totalBytes)\n *\n * \\brief Returns size of RPHardwareDataChunk\n *\n * \\desc Returns total size of all chunk header and payload bytes, does not include\n * object payload\n *\n * \\param world - pointer to world for calculating\n * \\param totalBytes - total size of all chunks headers and payload size\n *\n * \\return TRUE if ok\n *\n *\n *\n ****************************************************************************/\nRwBool\nRpWorldBinderStreamSize( const RpWorld *world,\n RwInt32 *totalBytes )\n{\n RwUInt8 totalChunkSize;\n RwUInt8 dataChunkSize;\n RwUInt32 dataChunkNo;\n RwInt32 tempSize;\n\n RWAPIFUNCTION(RWSTRING(\"RpWorldBinderStreamSize\"));\n\n RWASSERT(world != NULL);\n RWASSERT(world->worldBinding != NULL);\n RWASSERT(world->worldBinding->rpHardwareDataChunk != NULL);\n\n /* Calculate size of chunks */\n totalChunkSize = sizeof(*world->worldBinding->rpHardwareDataChunk);\n\n dataChunkSize = 0;\n for (dataChunkNo = 0;\n dataChunkNo < world->worldBinding->rpHardwareDataChunk->number;\n dataChunkNo++)\n {\n dataChunkSize += world->worldBinding->rpHardwareDataChunk->\n chunk[dataChunkNo].size;\n }\n\n totalChunkSize += dataChunkSize;\n\n /* Add in data chunk sizes */\n tempSize = 0;\n\n for (dataChunkNo = 0;\n dataChunkNo < world->worldBinding->rpHardwareDataChunk->number;\n dataChunkNo++)\n {\n tempSize += world->worldBinding->rpHardwareDataChunk->\n chunk[dataChunkNo].payloadSize;\n }\n\n *totalBytes = totalChunkSize + tempSize;\n\n RWRETURN(TRUE);\n}\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":56,"cells":{"text":{"kind":"string","value":"o'!lS'fJIV'Qlfl'IJQ''Z6 lQ''\\I'J~'H!::l~ ~':l!':l ~ '1/':\\\\\"'[.:\\~ QVQ 'Z6''1Q'~'1/':\\\\\"'[.:\\~.:\\6!'1/':\\\\\"'[.:\\~ 6'N~'lQ'~1:§_ u7Q'V'1/':\\\\\"'[.:\\~..'n~~VQ Q'_Q \\Q 'Z6 1 Q u~Q''Z6 1/':\\\\\"'[.:\\~ f:~l~''N Q \\!~'o'lflV'flQ'fl!'O ~~o1/':\\\\\"'[.:\\~,;:!~·1.61:§_ ~~~ ~1:§_ l!f 6'N~'lQ'1/':\\\\\"'[.:\\~ 61:'\\'11Q':\\!':\\f\\O!!.§1!!\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":57,"cells":{"text":{"kind":"string","value":"On Tue, Jul 31, 2001 at 10:48:41AM -0700, Stephen Adams wrote:\n |> What does this have to do with Gnome 1.x?\n\n |> AFAIK, the problem you described happens only in Gnome 1.x (I see the\n |> problem with KDE on some machines too)\n\nThe problem happens on GNOME and GNOME 2, even on windowmaker.\nThis is something that needs to be fixed in the X server (IMHO).\n\n> Yes, it happens on both 1.x and 2.x. I am sorry to hear it has nothing\n> to do with 1.8 but 2.x.\n\nNo, the 1.8.x bug is something else (it could be caused by X, but as I\nsaid, I have KDE problems too with XF86 and i810 video chip...).\n\n> A message like this can be useful (unless it is a server bug).\n\nIt's a server bug or something strange that happens with some drivers...\n\n> In Gnome 2, the configuration dialog would no longer try to duplicate\n\nThis is 100% true, the old Gnome X session has been deprecated (not\nremoved) in GNOME 2.x."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":58,"cells":{"text":{"kind":"string","value":" @startuml\n\n title Document Generation and Handling\n\n actor User as actor\n\n rectangle \"Client Application\" as client {\n participant \"File Management\" as fileManagement\n participant \"Document Generator\" as docGenerator\n participant \"Template Repository\" as templateRepo\n participant \"Communication Client\" as commClient\n }\n\n participant \"Communication Server\" as server\n participant \"Content Management System\" as cms\n\n actor Administrator as admin\n actor ExternalClient as extClient\n\n rectangle \"Google APIs\" {\n participant \"Google Docs API\" as gdocsApi\n participant \"Google Drive API\" as gdriveApi\n }\n\n rectangle \"PostgreSQL Database\" {\n participant \"File Storage\" as fileStorage\n participant \"Document History\" as docHistory\n }\n\n actor \"Remote Client\" as remoteClient\n\n actor \"API Client\" as apiClient\n\n== Document Generation and Sharing ==\n\nactor -> fileManagement: Select Template\nfileManagement -> templateRepo: Fetch Template\ntemplateRepo --> fileManagement: Return Template\n\nfileManagement -> docGenerator: Generate Document\ndocGenerator -> server: Save Document\nserver -> cms: Upload Document\ncms --> server: Document ID\nserver --> docGenerator: Document ID\ndocGenerator -> gdriveApi: Create Google Doc\ngdriveApi --> docGenerator: Google Doc ID\ndocGenerator --> fileManagement: Return Document\nfileManagement -> server: Share Document\nserver -> gdriveApi: Share Document\ngdriveApi --> server: Share Link\nserver --> fileManagement: Share Link\nfileManagement --> actor: Return Share Link\n\n== Administrator Operations ==\n\nadmin -> cms: Browse All Documents\ncms --> admin: List of Documents\n\nadmin -> server: Manage Document Access\nserver -> gdriveApi: Modify Share Permissions\ngdriveApi --> server: Success/Failure\nserver --> admin: Confirmation\n\n== External Client Operations ==\n\nextClient -> server: Retrieve Shared Document\nserver -> gdriveApi: Fetch Google Doc\ngdriveApi --> server: Document Content\nserver --> extClient: Return Document Content\n\nextClient -> gdocsApi: Edit Document\ngdriveApi --> extClient: Edited Document\n\nextClient -> server: Save Changes\nserver -> cms: Update Document\ncms --> server: Confirmation\nserver --> extClient: Acknowledgment\n\n @enduml\n "},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":59,"cells":{"text":{"kind":"string","value":"now'', \"the present\", \"soon\"). Such an argument would not take us very far, because it would leave open the issue of what now means.\nMost people find it natural to distinguish between \"what was\" and \"what will be\" and \"what is\". There is the experience of the passage of time which consists of a change from \"what is\" to \"what will be\" which becomes \"what was\" (or vice versa)\nThe English word \"now\" has two distinct meanings. For example, when somebody dies we say that he's gone \"now\", i.e. in contrast to \"later on\". When you finish talking about the \"past\", you have talked about the \"past\" and \"now\" you are talking about the \"present\", or something similar. So if you talk about \"now\", then you can talk about what happened \"now\" i.e. as opposed to later, or you can also say that you did something \"now\" or \"just now\" or \"last night\" i.e. as opposed to a while ago. Therefore, there are at least two different meanings of the word \"now\" which have to be taken into account.\nIt is of course very difficult to talk about what you actually experienced if you didn't actually experienced what you talk about. That is why language often does not tell anything about experience, but only about belief. Or maybe it does tell something about experience, but not to the people who say it, but to the people who are listening.\nMy brain contains a record of what I experienced. I also \"know\" that it contains such a record.\nSo do you assume that something exists by the mere fact that I can \"see\" it in my mind?\nIt is also interesting to note that most of us don't remember most of what actually happened to us, so in some cases our \"knowledge\" is quite limited compared to what actually happened (our mind has \"deleted\" most of what actually happened). I also wonder about the fact that the past actually exists. The fact that I \"remember\" something does not mean that the things that I \"remember\" actually happened. It could also be an illusion and/or a fantasy. It could also be that things have not happened as I remember.\nActually, I think that what happened in the past is quite irrelevant in some aspects. There is only \"now\", but I may also have \"knowledge\" of what happened in the past and/or what will probably happen in the future. I can also change the past in my mind, because the past as I remember it, is what is important at this stage.\nIn this sense \"past\" can only be what we remember from the past, and \"future\" what we expect to happen in the future. These are only memories, predictions and expectations which are always changing from moment to moment. We can never say what actually happened, or what actually will happen in the future. The future is never known, because the future does not exist.\nIn the \"here and now\" everything depends on the moment \"now\". In that moment there is a perception, and a memory of what was happening before that moment. Also there is the expectation that there will be something happening after that moment. These memories are sometimes distorted, and sometimes a new \"memory\" of an earlier event will be added.\nThis idea of the \"here and now\" is only relevant if you don't consider your memories from the past. But to me these memories are the most important thing about living. I think that without memories life would be very hard. But I also believe that one should not only live with the past memories but also with the memories that one creates now. These memories of what is happening in this moment are my main concern, because that is how I think I can best predict the future. If you concentrate on these memories from the \"now\", you can make your life and the future of mankind.\nThe only thing I have no doubt in, is my \"memory\" of the \"here and now\". Of course, what you \"perceive\" is a mixture of what is \"real\" and what is the result of \"interpretation\". But, this is different from \"false memory syndrome\". That is just a memory from the \"now\" which you don't think about enough (you don't \"look\" at this memory enough).\nI'm sure you \"look\" at memories from the \"past\" enough, but maybe you don't \"look\" at memories from the \"now\" (which you \"perceive\") enough. That's my assumption. What do you think about that?\nIn order to believe that something exists one has to have experience or have some kind of \"evidence\". And when we talk about \"evidence\" then one has to ask the question how this \"evidence\" was obtained? How do you know that something exists? For example, you say that I say that something exists, so it is a fact.\nWhat do you say about this?\n- Because you can see it, feel it and touch it, it is there.\n- Well, what do you think about \"ghosts\" that can't be seen, felt or touched?\nI guess you will think that \"ghosts\" do not exist because you can't feel them. But how do you know that?\nHow do you know that when you \"see\", \"feel\", \"hear\" something, it is actually there? You can't tell unless you have some kind of evidence, and you get that evidence from your experience.\nSo when you claim that ghosts exist you are going beyond your experience. Are you sure about this?\nAlso, how can you be sure that \"you\" exist? Maybe you are just a \"dream\" that is happening inside my brain. Maybe all you say, everything that you do, everything you perceive is only an illusion. It is just my imagination of what would happen if there was another person in this world.\nActually, when we say that \"you\" exist, then we are making an assumption that someone else also exists. There is no \"you\", and there is no \"me\" either.\nAre you sure about this?\nIf not, can you tell me how do you know for sure that I am not just a dream that is happening in your brain?"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":60,"cells":{"text":{"kind":"string","value":"K+F'+K+1$$ and $\\Delta\\setminus A_{U_{k+F'+1}}$ is a total $\\Lambda\\setminus U_{k+F'+1}$-transversal contained in $X'$. Since $X'$ is a critical total transversal of $D^*(X)$ and $X'$ has $k+1$ total transversals, it follows that $\\Lambda\\setminus U_{k+F'+1}$ is contained in a vertex of $D^*(X)$. Note that $u$ and $v$ do not belong to $A_{U_{k+F'+1}}$. Moreover, if $A$ is a vertex of $D^*(X)$ which contains $\\Lambda\\setminus U_{k+F'+1}$ and either $u$ or $v$ is a member of $A$, then $(A\\setminus \\{ u,v\\} )\\cup \\{ t,w\\ }$ contains $X'$ as a total transversal. Hence, as $X'$ is the only critical total transversal of $D^*(X)$ which has total transversals other than itself, it follows that no vertex of $D^*(X)$ contains $\\Lambda\\setminus U_{k+F'+1}$ along with either $u$ or $v$, implying that no vertex of $D^*(X)$ contains $\\Lambda\\setminus U_{k+F'+1}$. \n Moreover, if $\\{ \\Lambda\\setminus U_{k+F'+1}\\ } \\cup T$ is a minimal total transversal of $D^*(X)$ where $T\\subseteq \\{u,v\\}$, then $T\\cap \\{ u,v\\}\\ne \\emptyset$, for otherwise $X'\\subseteq \\Lambda\\setminus U_{k+F'+1}\\subseteq A$ for a vertex $A$ of $D^*(X)$. Thus $T= \\{ u\\}$ or $T= \\{v\\}$ and no vertex of $D^*(X)$ contains $\\Lambda\\setminus U_{k+F'+1}$. Now $D^*$ is obtained from $D^*(X)$ by adding the vertices $\\{ x,u\\}$, $\\{ t,w\\}$, $\\Lambda\\setminus U_{k+F'+1}\\cup \\{ v\\}$ and all the edges in between them. Then $D^*(X)\\prec_{\\mathcal{O}} D^*$ and $\\tau_{\\odot}(D^*)=\\tau_{\\odot}(D^*(X))= k+1$, contradicting the assumption that $D$ is a minimum lexicographic ordering extremal digraph for $\\odot_k(n)$. Hence at least one of $X'\\cap \\Delta_X'$ and $X'\\cap \\Delta_u$ must be empty. This completes the proof of Claim\\ref{clm:crossing}. \n \\end{proof}\n\nRecall that the vertices $\\{u, x,v\\}$, $\\{t, w\\}$, $U$ are disjoint for every $U\\in \\mathcal{U}_{F'}$ and that $\\{ u, x,v\\}$ is a singleton for all $F'\\ne U\\in \\mathcal{U}$. As the proof of the next claim is very similar to that of Claim \\ref{clm:crossing} and hence, we omit the proof.\n\n\\begin{claim}\\label{clm:noncrossing}\nEvery $k+1$ crossing free critical total transversal of $D^*(X)$ has non-empty intersection with all vertices of $D^*(X)$ if and only if $\\{u,x,v\\}$ is not contained in a vertex of $D^*(X)$. \n\\end{claim}\n\nLet $H$ be a spanning subdigraph of $D$ that has $\\rho_D(U_1),\\ldots,\\rho_D(U_{k+F'})$ as its vertices and is obtained by the deletion of the edges $(A,B)$ in $D$ that satisfies one of the following. \n\\begin{enumerate}[$(1)$]\n \\item For any $i\\neq j\\in [k+F']$, $\\rho_D(U_i)$ has no outgoing edge to $\\rho_D(U_j)$, and \n \\item If $U\\in \\mathcal{U}\\setminus \\mathcal{U}_{F'}$, then $\\rho_D(U)$ has no outgoing edge to a vertex of $D$.\n\\end{enumerate}\nNote that $\\rho_D(A_U)$ is a vertex of $H$ for $U\\in \\mathcal{U}$ and it is exactly one of $\\rho_D(U_1),\\ldots,\\rho_D(U_{k+F'})$. \n\n\\begin{claim}\\label{clm:noleaf}\nNo vertex of $H$ is a leaf.\n\\end{claim}\n\\begin{proof}\n\nWe first show that every vertex of $H$ other than the last vertex has an incoming arc. Let $1\\leq i \\leq k+F'-1$. Suppose that the vertex $\\rho_D(U_i)$ has no incoming arc in $H$. Then we have $d^+_H(\\rho_D(U_i))\\geq 1$ since $D$ has no cycle of length two and $U_i\\in \\mathcal{U}_{F'}$. Let $U_j\\in \\mathcal{U}_{F'}$ be a vertex such that $(\\rho_D(U_i),\\rho_D(U_j))$ is an arc of $H$. Let $U_j'$ be the vertex $X'$ or $A_{U_{k+F'+1}}$. Let us consider the following subdigraph $D'$ which is obtained from $D$ by the deletion of all outgoing arcs from $\\rho_D(U_i)$, the addition of the new vertex $U_j'$, the addition of arcs $(v',U_j')$ for every vertex $v'$ such that $(\\rho_D(U_i),v')$ is an arc in $D$ and the deletion of all the arcs $(v',U_j')$ where $v'$ is a vertex of $H$. Note that $\\tau_\\odot(D')=k+1$ by Lemma \\ref{lemma:slicen} and $\\rho_D(U_i)$ is a leaf of $H'$. So, by induction on the number of leaves in $H$ we have $D'\\preceq_{\\mathcal{O}} P_n$. Consequently, $D\\preceq_{\\mathcal{O}} D'\\preceq_{\\mathcal{O}} P_n$. This contradiction implies that every vertex of $H$ other than the last vertex must have an incoming arc.\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":61,"cells":{"text":{"kind":"string","value":"I'm not sure you would use \"dangling\". \"Pending\" seems to make\n |more sense to me.\n\n|I'd think of a question like \"Should we release X now\" as a dangling question\n|once we do the release.\n\n | |\n \"Should we release X now?\" Pending before the release,\n \"Should we ever release X\" Dangling after the release\n\n |\n |> The term dangling question is in reference to a question asked but not\n |> answered.\n\n |And later given a negative answer.\n |\n \"Should we ever release X\" - if we release it then the question is not\n answered, if not, then the question gets a negative answer.\n\n|If we'd decided never to do the release, but a pending question wasn't\n|closed out, I'd view it as a missed opportunity to tidy up the\n|tracking system. (It should still be considered as\n|a closed question, just so we know it is a done thing.)\n\n+1\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":62,"cells":{"text":{"kind":"string","value":"I_{2}^{T} I_{1}^{T} \\mathrm{R}^{-1} I_{1}}=\\hat{\\boldsymbol{\\Sigma}}_{I, O}$$ Proof. First note that $\\hat{\\mathbf{J}}_{E} I_{2}=I_{1} \\mathbf{J}$ with $\\mathbf{J}=\\mathbf{X}_{\\infty} \\boldsymbol{\\Sigma}_{x z x}^{-1} \\mathbf{X}_{\\infty}^{T}$ from Corollary 4.1 and from $\\hat{\\mathbf{J}}_{E}=\\mathbf{J}^{\\dagger}=I-\\mathbf{J}\\left(\\mathbf{I}_{m}-\\mathbf{J}^{2}\\right)^{-1}(\\mathbf{J}-\\mathbf{I})$. Then use $\\hat{\\mathbf{\\Sigma}}_{I, O}=E_{O}\\left\\{\\left(\\boldsymbol{\\xi}_{O}-\\mathbf{A}_{O} \\boldsymbol{\\xi}_{I}\\right)\\left(\\boldsymbol{\\xi}_{O}-\\right.\\right.$ $\\left.\\mathbf{A}_{O} \\boldsymbol{\\xi}_{I}\\right)^{T}$ and $$\n\\mathbf{A}_{O}=E_{O}\\left\\{\\hat{\\mathbf{J}}_{E} \\boldsymbol{\\xi}_{O} \\boldsymbol{\\xi}_{I}^{T}\\right\\}=\\hat{\\mathbf{J}}_{E} I_{2} \\mathbf{R}^{-1} I_{1}^{T}=\\left(I_{1} \\mathbf{J}-\\mathbf{J}^{\\dagger} \\mathbf{J}\\right) \\mathbf{R}^{-1} I_{1}^{T}.\n$$ 4.4. Statistical Properties. The purpose of this section is to establish the conditions under which the least square estimates, $\\hat{\\mathbf{A}}_{x}$ and $\\hat{\\mathbf{B}}_{x}$, are optimal, i.e., they possess asymptotic statistical properties that satisfy the Cramér-Rao lower bound [7, 16], such as being unbiased and asymptotically efficient. The following assumptions are necessary to reach this result: # Hypotheses H4.3. \n(i) $\\boldsymbol{\\xi}_{k}$, for all $k$, are independent and identically distributed random vectors with zero mean. (ii) The pair $\\mathbf{\\Sigma}_{\\boldsymbol{\\eta} \\boldsymbol{\\eta}}$ and $\\left(\\boldsymbol{\\Sigma}_{\\boldsymbol{\\zeta} \\boldsymbol{\\eta}} \\boldsymbol{\\Sigma}_{\\boldsymbol{\\eta} \\boldsymbol{\\eta}}^{-1} \\boldsymbol{\\Sigma}_{\\boldsymbol{\\eta} \\zeta}-\\boldsymbol{\\Sigma}_{\\boldsymbol{\\zeta} \\zeta}\\right)$ is positive definite. In the linear deterministic model $\\mathbf{S}_{k+1}=\\mathbf{A} \\mathbf{S}_{k}$, if the rank condition [15, p. 226], $\\operatorname{rank}\\left(\\mathbf{I}-\\mathbf{A} \\mathbf{A}^{T}\\right)^{-1} \\mathbf{A}=n$, is violated, then $\\hat{\\mathbf{A}}_{x} \\stackrel{P}{\\rightarrow} \\mathbf{A}$ cannot be achieved. In other words, there is no observation matrix that can guarantee convergence. It has been shown that for this rank condition, the number of rows of the observation matrix, $r$, must be greater or equal to the number of parameters to be estimated [15, p. 228], i.e., $r \\geqslant n$; in this case $r=n$ and there is a loss of efficiency as compared to the deterministic parameter estimation case. Thus, to be consistent, it is necessary to have $\\operatorname{rank}\\left(\\mathbf{I}-\\hat{\\mathbf{A}}_{x} \\hat{\\mathbf{A}}_{x}^{T}\\right)^{-1} \\hat{\\mathbf{A}}_{x}=n$, and therefore, ## 1. Hypothesis H4.4.\n$\\left(\\mathbf{I}-\\hat{\\mathbf{A}}_{x} \\hat{\\mathbf{A}}_{x}^{T}\\right)^{-1} \\hat{\\mathbf{A}}_{x}$ is positive definite for all $\\hat{\\mathbf{A}}_{x}$, where $\\hat{\\mathbf{A}}_{x}$ is defined in (4.49). Note that the vector $\\overline{\\mathbf{x}}_{k}$ in (4.47) is assumed to converge to an equilibrium point $\\overline{\\mathbf{x}}$, i.e., $\\lim _{k \\rightarrow \\infty}\\left\\|\\overline{\\mathbf{x}}_{k}-\\overline{\\mathbf{x}}\\right\\|=0$ for all $\\boldsymbol{\\xi}_{k}$, and similarly for $\\mathbf{z}_{k}$, $\\overline{\\mathbf{\\Sigma}}_{z z} \\in L$ and $\\overline{\\boldsymbol{\\Sigma}}_{x z z} \\in L$. As a consequence of the assumption given in (4.42), the vector $\\boldsymbol{\\xi}_{k}$ is stationary and therefore, in general, $\\operatorname{cov}\\left[\\boldsymbol{\\xi}_{k}, \\boldsymbol{\\xi}_{l}\\right] \\neq 0$ for $k \\neq l$. This necessitates the introduction of Hypotheses $\\mathrm{H} 4.2$ and $\\mathrm{H} 4.4$ to achieve the convergence properties as outlined in the following theorem. ## 2. Theorem 4.4.\n(i) Under Hypothesis $H 4.3 \\hat{\\mathbf{\\Sigma}}_{x z z} \\stackrel{P}{\\rightarrow} E\\left[\\boldsymbol{\\xi}_{k} \\boldsymbol{\\xi}_{k}^{T}\\right]$ with probability 1. (ii) Under Hypotheses H4.3 and H4.4: $\\hat{\\mathbf{A}}_{x} \\stackrel{P}{\\rightarrow} \\mathbf{A}$ and $\\hat{\\mathbf{B}}_{x} \\stackrel{P}{\\rightarrow} \\mathbf{B}$. Proof. To establish $\\hat{\\boldsymbol{\\Sigma}}_{x z z} \\stackrel{P}{\\rightarrow} E\\left[\\boldsymbol{\\xi}_{k} \\boldsymbol{\\xi}_{k}^{T}\\right]$ with probability 1, the result given in Theorem 4.1 is used which states that $S_{k}=\\mathbf{x}_{k} \\mathbf{x}_{k}^{T} \\stackrel{P}{\\rightarrow} \\mathbf{S}_{\\infty}$, where $\\mathbf{S}_{\\infty}$ is the stable matrix equilibrium defined in (4.8). The proof then follows along similar lines as in the deterministic case and using (4.66). Note that from Theorem 4.1 the following properties can be derived. From (4.8) $$\n\\overline{\\boldsymbol{\\Sigma}}_{z z}=\\mathbf{G} \\hat{\\mathbf{z}}_{0} \\hat{\\mathbf{z}}_{0}^{T} \\mathbf{G}^{T}=\\mathbf{G} \\boldsymbol{\\Sigma}_{\\boldsymbol{\\eta} \\boldsymbol{\\eta}} \\mathbf{G}^{T}=\\overline{\\boldsymbol{\\Sigma}}_{x z x}.\n$$ Asymptotic consistency with probability 1 is defined by $$\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":63,"cells":{"text":{"kind":"string","value":"!I$9H)H!ÌÌÄÌÌÌ\" )H!9!56840!!K3 \" $Î3*\"\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":64,"cells":{"text":{"kind":"string","value":" ( _v_ 0, _x_ 1,..., _x n_), with (1) ⇒ (2).\n\n. Hint: Notice that in this case, the operations\n\nwhich can also be written using the exponential function (from the preceding exercise) and the additive inverse, are closure operators for the relations _R i_ ( _i_ = 1,2,..., _n_ ). It then follows from the first part of exercise 5 that relations\n\nare _P_ -primitive recursive; their disjoint union (compare the first part of exercise 2) is, for suitable functions , an equivalence relation , and finally\n\nis also _P_ -primitive recursive (why?), and thus . If _E_ 1,..., _E n_ form a partition of _N k_, then also\n\n(for any set _A_ ).\n\n. We only look at the implication from left to right. The assumption means that for each _j_, ( _i_ 1,..., _i j_, _x j_) ∈ _R_ _m_ \\+ 1 iff there exist _x_ 1,..., _x j_−1 ∈ _N_, such that\n\nThus . Now use the second part of the preceding exercise.\n\n. This is a modification of a proof due to Hartley Rogers, Jr. (recall the comments following theorem 2.5.2). First prove, using definition 3.4, that for every relation _P_, the condition\n\nimplies\n\n(in both formulas, ). Then consider the formula:\n\nLet ; its satisfiability, established earlier, implies the existence of a natural number _i_, such that (1)\n\nNotice that for an arbitrary -predicate _Q_,\n\nand for an arbitrary -predicate _R_,\n\nThe formula\n\nimplies that _T_ ( _i_, _i_ ), hence, since ( _i_, _i_ ) ∉ _S_, by (1), (2) and (3), ; i.e., the formula\n\nis true. It now follows from (1) and (3) that ( _i_, _i_ ) ∈ _S_ ; i.e.,\n\nwhich contradicts (4).\n\n. (a) We have:\n\nand the right-hand side is primitive recursive.\n\n_(b) Hint:_ Consider , defined as follows:\n\n. First prove the lemma: For any classes of relations, the formula\n\nimplies\n\n. (a) That this relation is , follows, as in example 4.1.13, from the fact that , where the sequence of terms\n\nis easily seen to be primitive recursive. The relation is clearly primitive recursive; it thus remains to be shown that it is not in . Assume that it were. Then the function _f_ defined by the formula\n\nwould be primitive recursive (cf. example 4.1.9). But we know (see the proof of theorem 4.1.4) that this function cannot be primitive recursive, which gives us the desired contradiction.\n\n_(b) Hint:_ Notice that iff , and apply part (a) and exercise 4 above.\n\n. Let _R_ ( _a_, _b_, _c_ ) be a primitive recursive relation, such that for all _x_,\n\nLet . The formula\n\nholds; so for _c_ = _i_, we have that for every _x_. In particular, , i.e., for every _x_, which contradicts the choice of _R_.\n\n. Suppose, to the contrary, that _J_, _K_ were primitive recursive. Then the relation would also be primitive recursive (why?), and hence its diagonalization,\n\nwould be primitive recursive (cf. exercise 2, p. 169). However, we know that this relation is not even arithmetical (cf. the solution to exercise 6(a)).\n\n. _Hints:_ (a) If , we must have . For all .\n\n_(b) Hint:_ Use the fact that\n\n. _Hint:_ For and :\n\n. Hint: Consider the formulas\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":65,"cells":{"text":{"kind":"string","value":"Elmer Robinson]]. {{P}} '''It was a dark and stormy night''' is the first line of '''''Paul Clifford''''' (1830), written by [[British]] novelist [[Edward George Bulwer-Lytton (writer)|Edward George Bulwer-Lytton]]. Lytton won a bet that it was possible to write the opening lines of a best-selling novel consisting of nothing but adjectives and the adverb very. The passage reads:
It was a dark and stormy night; the rain fell in torrents, except at occasional intervals, when it was checked by a violent gust of wind which swept up the streets (for it is in London that our scene lies), rattling along the house-tops, and fiercely agitating the scanty flame of the lamps that struggled against the darkness. Edmund Blackadder and Baldrick disagree as to whether this opening was inspired. They compare the first line of Clifford against other great opening sentences (such as [[Jane Austen]]'s Sense and Sensibility). The scene was created for [[The Complete Works of William Shakespeare (Abridged)], the play where we meet Edmund Blackadder. It's one of the most famous scenes in the entire [[Blackadder]] series. It was actually filmed in 2001, which would explain why the costumes differ from the costumes we see in the rest of ''Back & Forth''. + In this scene, Blackadder and Baldrick attempt to perform the opening scene of the 2003 Academy Award winning movie (and all time cult favourite) [[The Lord of the Rings]], the hobbit ''[[Frodo Baggins]]'' finds the [[One Ring]] and declares \"It shall be a mighty weapon\" (\"no,\" Baldrick responds, correcting Blackadder in a moment of rare insight, \"it's meant to be a ring\"). In another nod to the film, in a previous scene, Blackadder refers to the ring as \"that Ring of power, forged in the very fires of mount doom\". Blackadder then attempts a second, more ambitious performance -- one of [[Edward George Bulwer-Lytton]]'s most famous (or infamous) opening lines -- the first line of the book ''[[Paul Clifford]]'' which begins, \"It was a dark and stormy night\". With Baldrick's support, Blackadder launches into his performance with much pomp and ceremony only to receive a slap in the face from Baldrick who belittles it as \"infantile drivel\". + Blackadder: \"This was the worst line I have ever heard. Honestly I was...shocked!\" Baldrick: \"Shocked indeed...\" When Baldrick claims he's been writing Shakespeare's plays in between time travelling and they have \"all been best sellers\", Blackadder remarks \"You know, Baldrick, for somebody who hasn't got a brain I quite like you.\" This line was used in the opening to the \"Making of Back & Forth\" extra on the DVD. The main dialogue is as follows: Baldrick: [In a thick ''Australian'' accent] \"Ed...Ed, mate.\" Blackadder: \"What is it, Baldrick, this isn't a picnic.\" Baldrick: [As Shakespeare] \"But it was a dark and stormy night! Baldrick: \"Can you imagine a more improbable start to a film?\" Blackadder: \"No\" Baldrick: \"How is it that...?\" (He is stopped from completing his question when Blackadder raises his hand. This time in his Australian accent.) Blackadder: \"Ah, it's okay, my man. I was going to suggest something a little less trite.\" Baldrick: \"Oh, like what, mate?\" Blackadder: \"Like what?...\" Baldrick: \"Yes?\" Blackadder: \"Let's make a start.\" Baldrick: \"Start with what?\" Blackadder: \"Any story can only begin one way.\" Baldrick: \"So?\" Blackadder: \"It was the best of times...it was the worst of times...\" Baldrick: \"The time of chivalry, of long swords and shields.\" Blackadder: \"It was a dark and stormy night.\" Baldrick: \"Which way are we doing it now?\" Blackadder: [Mocking him] \"The worst is yet to come.\" + The audience in this performance are a pair of [[alien]]s. They are introduced by [[Professor Riemann Schultz]] who claims to have \"created the first ever portal linking Earth to the universe of other humanoid worlds\". He also refers to these aliens as the \"Podunkians\" and appears to be enamoured by them. The Podunkian are similar in appearance to a cross between [[Troy McClure]] and [[Ugly naked guy]], the best known alien from the sitcom ''[[Friends (sitcom)|Friends]]''. + =References= {{Reflist}} + '''''Note: The following information has not yet been verified:''' + == {{Blackadder Links}} =="},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":66,"cells":{"text":{"kind":"string","value":"==:===:==: SIZ = 1 ----+----1----+----2----+----3----+----4----+\n -i- +...+..:....1....+....2....+....3....+....4....+....5....+....6....+.\n 0000 $f3b1a1fd 503ab065 3d0c010d 7ef56d58 ????.P;:e=...~.mX\n 0010 b15d2c6b 1a2145f1 9e67ea3b b610fa4b .],k.!E..g.;...K\n 0020 9e2a835c 43e90da7 392ef4fd a2c510ba .*.\\C...9.......\"\n 0030 49c0cb7a 3da59ce5 b5505663 b9843eeb I..z=.....PVc..>.\n 0040 9ff4b9cd 23d1ec28 98583134 78d22193 ....#..(8X14x.!\"\n 0050 8890625b f86b2ee1 7bdcf339 d2b9cf42 ..b[8k..{..9..\"B\n 0060 701f310a 895eb973 8d7cb31b bcf46f05 p.1..\"^.s.|...\"o.\n 0070 2e76b61f b60b00e6 02006e00 c6030000 .v.........n.....\n \n \n \n \n \n */\n \n int main(){\n \n //calloc() function is useful to allocate storage in contiguous memory.\n \n int n,*p,*q;\n \n printf(\"\\nHow Many Numbers: \");\n scanf(\"%d\",&n);\n \n \n p = calloc( n , sizeof(int));\n \n if(p==NULL)\n {\n printf(\"\\nSorry We Are Not Able To Create...\");\n exit(1);\n }\n \n \n for(int i=0; i l/z.\n\nProof. Suppose p is a solution of (1.1), and let d be its spectral\nmeasure. Since Hp(S,T) is analytic on D°, it is easy to show that\nthe right-hand side of (6.1) defines a bounded linear functional of S.\nSince S is an analytic Stieltjes function, a theorem of Schoenberg\n(see [5]) implies that pT((s,oo)) = I/z and therefore the\nFourier transform of a function analytic and bounded in \\z\\ < 1\nunder the inversion of the z-variable. But any measure whose Fourier\ntransform has these properties is uniquely determined, and hence\n\nis given by (6.1). This completes the proof of Theorem 6.2.\n\n6\nBIBLIOGRAPHY\n\n\n\n\fREFERENCES\n1. D. Alpay, M. E. Corach, and J. Peccati. Some observations on Schur\nand Nevanlinna functions of class D. Int. J. Math., 16:155-182, 2005.\n2. H. Fujii. Spectral transformation approach to Hamburger-Stieltjes mo-\nment problem. Houston J. Math., 13:1-7, 1987.\n3. A. Gilat. The Laplacian moment problem. In J.-P. Antoine, D. Baleanu,\nM. Demirci, Y. Luchko, M. Mursaleen, M. A. Noor, M. Mursaleen, and\nM. A. Noor, editors, Fractional Calculus, Theory and Applications, pages\n375-380. InTech, 2012.\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":68,"cells":{"text":{"kind":"string","value":"Arithmetic and Shifts\n\n\t.global main\n\t.text\n\nmain:\n# Storing Values in Memory:\n\n# This section illustrates how to store values in memory using the `str` and `strb` instructions in ARM assembly.\n# It demonstrates storing a 4-byte integer and a 1-byte integer in memory.\n# Data Section\n.data\nmemory_: \n\t.word 0x00 \t\t\t\t\t// This represents the 4-byte integer, initially set to 0x00\n\nmemory_b:\n\t.byte 0x00 \t\t\t\t\t// This represents the 1-byte integer, initially set to 0x00\n\nmemory_d:\n\t.space 4\n\n.text\n\tLDR r1, =memory_\n\tLDR r2, =memory_b\n\tLDR r3, =memory_d\n\n\tMOV r4, #0xDEADBEEF \t\t\t// Integer to store (4 bytes)\n\tMOV r5, #0xAA \t\t\t\t\t// Byte to store (1 byte)\n\tSTR r4, [r1, #0] \t\t\t\t// Store the integer in memory\n\tSTRB r5, [r2, #0] \t\t\t\t// Store the byte in memory\n\n# Loading Values from Memory:\n\n# This section demonstrates how to load values from memory using the `ldr` and `ldrb` instructions in ARM assembly.\n# It demonstrates loading a 4-byte integer and a 1-byte integer from memory.\n\n.text\n\tmov r6, #0x95114430\n\tSTR r6, [r1, #0] \t\t\t\t// Store the integer in memory\n\n\tLDR r4, [r1, #0] \t\t\t\t// Load the integer from memory\n\tLDRB r5, [r2, #0] \t\t\t\t// Load the byte from memory\n\n\t\n# Array Section\n\n# This section demonstrates how to work with arrays in ARM assembly.\n# It includes instructions for accessing and modifying array elements.\n\n.data\n\tnumbers: .word 10, 20, 30, 40, 50 // An array of 5 integers\n\n.text\n\tmov r6, #4\n\tLDR r6, =numbers \t\t\t\t// Load the address of the array into R6\n\n\tLDR r4, [r6, #0] \t\t\t\t// Load the first element (10) into R4\n\tLDR r5, [r6, #12] \t\t\t\t// Load the fourth element (40) into R5\n\tLDR r7, [r6, #24] \t\t\t\t// Load the fifth element (50) into R7\n\n\tSTR r8, [r6, #20] \t\t\t\t// Store 31 into the second element\n\tSTR r9, [r6, #4] \t\t\t\t// Store 21 into the third element\n\t\n\n# Array Indexing\n#\n# This section demonstrates array indexing using registers.\n# We will load and store array elements using index registers.\n\n.text\n\tLDR r6, =numbers \t\t\t\t// Load the address of the array into R6\n\tMOV r7, #2 \t\t\t\t\t\t// Set R7 as the index (2 means the third element)\n\tLDR r9, [r6, r7, LSL #2] \t\t// Load numbers[2] into R9 (multiplied by 4 for word size)\n\n\tMOV r8, #30 \t\t\t\t\t// Load 30 into R8\n\tSTR r8, [r6, r7, LSL #2] \t\t// Store 30 into numbers[2] (multiplied by 4 for word size)\n\n\n# Multidimensional Arrays\n#\n# This section demonstrates how to work with 2D arrays in ARM assembly.\n# It includes instructions for accessing and modifying 2D array elements.\n\n.data\n\tmatrix: \n\t\t.word 1, 2, 3\n\t\t.word 4, 5, 6\n\t\t.word 7, 8, 9\n\n\tnumRows: \t.word 3\n\tnumCols: \t.word 3\n\n.text\n\tLDR r1, =matrix \t\t\t\t// Load the address of the matrix into R1\n\tLDR r6, =numRows \t\t\t\t// Load the number of rows into R6\n\tLDR r7, =numCols \t\t\t\t// Load the number of columns into R7\n\tLDR r6, [r6] \t\t\t\t\t// Load the number of rows from the memory address in R6\n\tLDR r7, [r7] \t\t\t\t\t// Load the number of columns from the memory address in R7\n\n\tLDR r4, [r1, #0] \t\t\t\t// Load matrix[0][0] (1) into R4\n\n\tMOV r8, #2 \t\t\t\t\t\t// Set row index to 1 (element [1][1])\n\tMOV r9, #1 \t\t\t\t\t\t// Set col index to 1 (element [1][1])\n\n\tMOV r6, #0 \t\t\t\t\t\t// Zero r6\n\tMOV r7, #0 \t\t\t\t\t\t// Zero r7\n\tLDR r6, =numCols \t\t\t\t// load numCols\n\tLDR r7, [r6] \t\t\t\t\t// r7 = numCols\n\t// LDR r7, =numCols \t\t\t\t// load numCols address (useless)\n\tMUL r10, r9, r7 \t\t\t\t// rowOffset = row * numCols = 1 * 3 = 3\n\tMUL r11, r8, #4 \t\t\t\t// colOffset = col * wordSize (4 bytes) = 2 * 4 = 8\n\tADD r12, r10, r11 \t\t\t\t// elementOffset = rowOffset + colOffset = 3 + 8 = 11\n\tLDR r4, [r1, r12] \t\t\t\t// Load matrix[2][1] (7) into R4\n\t\n\tMOV r4, #45\t\t\t\t\t\t// 45 into r4\n\tSTR r4, [r1, r12] \t\t\t\t// Store 45 into matrix[2][1] (replace the 7 with 45)\n\n\n# Dynamic Memory Allocation\n\n# This section demonstrates dynamic memory allocation using the `bl` instruction to call assembly routines for allocation and deallocation.\n\n.text\n\tbl allocateMemory \t\t\t\t// Call the assembly routine for memory allocation\n\tmov r1, r0 \t\t\t\t\t\t// Store the allocated memory address in r1 for deallocation\n\tbl deallocateMemory \t\t\t// Call the assembly routine for memory deallocation\n\n\n.data\ndynamic_memory_address:\n\t.word 0x00\n\n.text\nallocateMemory: \t\t\t\t\t\t// Assembly routine for memory allocation\n\tSTMDB sp!, {lr} \t\t\t\t\t// Save the return address on the stack\n\n\tbl malloc \t\t\t\t\t\t// Call the C malloc function\n\tSTR r0, dynamic_memory_address\t\t// Store the allocated memory address in dynamic_memory_address\n\n\tADD sp, sp, #4 \t\t\t\t\t// Restore the return address\n\tBX lr \t\t\t\t\t\t// Return to the calling function\n\ndeallocateMemory: \t\t\t\t\t\t// Assembly routine for memory deallocation\n\tSTMDB sp!, {lr} \t\t\t\t\t// Save the return address on the stack\n\n\tLDR r0, =dynamic_memory_address \t// Load the allocated memory address\n\tLDR r0, [r0]\n\tbl free \t\t\t\t\t\t// Call the C free function\n\tMOV r0, #0 \t\t\t\t\t\t// Clear the dynamic_memory_address\n\tSTR r0, dynamic_memory_address \n\n\tADD sp, sp, #4 \t\t\t\t\t// Restore the return address\n\tBX lr \t\t\t\t\t\t// Return to the calling function\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":69,"cells":{"text":{"kind":"string","value":"DESC (b-36561-4)'] (array([ 0., 100., 30300., 50400., 60500., 61500., 62500., 63500.,\n 64500., 65500., 66500., 67500., 68500., 69500., 70500., 71500.,\n 72500., 73500., 74500., 75500., 76500., 77500., 78500., 79500.,\n 80500., 81500., 82500., 83500., 84500., 85500., 86500., 87500.,\n 88500., 89500., 90500., 91500., 92500., 93500., 94500., 95500.,\n 96500., 97500., 98500., 99500.]))\n\n\n```python\n# Try to find the class feature by itself\nmodel = RandomForestRegressor()\n```\n\n\n```python\nmodel.fit(X_train, y_train)\ny_predicted = model.predict(X_test)\n```\n\n\n```python\n# Do not include in your report\nprint(r2_score(y_test, y_predicted))\n```\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":70,"cells":{"text":{"kind":"string","value":" now, the best performance is achieved by the cascaded de- \n cascading $[4,20]$ with the lower switching frequency operating at the main filter resonant frequency (i.e., at the inverter bridge switching frequency) and the second filter resonant frequency chosen higher to limit the second harmonics in the output inductor current as shown in Fig. 9-11. A third-order low- pass filter consisting of a $L C$ filter followed by an inductor can also be employed, but due to the nonideal inductor quality factor (due to capacitor voltage control mechanism), the out- put inductor current ripples are not usually very low compared to the second-order $L C$ filters and sometimes require control action to eliminate the low-frequency components. Higher har- monic content also leads to higher filter losses. Therefore, the second-order $L C$ filter is mostly utilized in boost converter in verter-based systems, making the converter a third-order one(Fig. 9-11) and causing large inductor current oscillations. The active damping with capacitor voltage feedback [23] is thus em- ployed with a cascaded filter to avoid this phenomenon. \n![figure](URL) \nFIGURE 9-11 \n## 9.5.2 Control Strategy of the Three-Phase Grid-Tie AC/DC Converter System\n The two most well-known control methods are voltage and current control. With voltage control, the input voltage (at the dc-link) of the inverter is used as the outer-loop control vari- able. However, with current control, the reference current is generated in the outer-loop as explained below, and then this reference current is forced as the output of the converter. The grid-tied converter generally uses current control. Therefore, only the current control method has been presented below. \n Based on the three-phase load current requirements and grid voltage as a feedback, the re- quired $d q$ current reference components are generated using phase-voltage oriented Park trans form. In case of three-phase currents generated independently, a delay and a phase shift is likely to happen making the system unbalanced and causing an oscillating reactive power be- tween the source and the converter. In case of an islanded network, the dc-link voltage is regu- lated at a fixed value by adjusting the output active power (through a $q$ -axis current controller). As per the power transfer requirement, the real power is actively controlled in a two-stage topology by utilizing the boost converter. Generally, a PI control mechanism is used, and the reference $d q$ -axis current components are generated as per the desired active and reactive power transfer requirements (where $d$ and $q$ current components in the reference frame repre sent reactive and active power transfer, respectively). A typical control schematic is shown in \n Fig. 9-12 for a grid-interconnected system. By appropriate gating signals, the current $i_{a c s}$ of the converter is made to follow the current reference $i_{a c}^{*}$ , which is further derived from the ex ternal dc load current and active power references of the entire system. Using grid-voltage ori- ented Park transform with the help of a synchronizing circuit [8], the current references are derived from $i_{a c s}$ and $V_{d c}$ . The resulting reference currents are processed into pulses at the converter gate as shown in Fig. 9-12. As discussed in Section 9.5.1, with the help of active damping techniques [8], the grid-side filter inductor current is made sinusoidal, and in the steady state, there is no pulsating energy stored in the grid-side inductors, which results in no har- monic power exchanged with the grid. However, the boost converter output voltage is required to be as ripple free as possible to reduce the input current distortion. Thus, the output inductor of the boost converter plays a vital role in smoothing its output voltage. The boost converter control architecture is described in Section 9.3.3. \n![figure](URL) \nFIGURE 9-12 \n## 9.6 Grid Tied Four-Leg Inverter Operation\n As discussed in Section 9.5, the grid-connected inverter uses the load or dc current as a feed- back to control the inverter current, whereas in a standalone microgrid application with unbal- anced and nonlinear loads, the inverter controls its current to regulate the ac-side voltage at the converter end. As stated above, the grid-tied inverter uses current control method (discussed in Section 9.5) and three- or four-leg topologies are generally used in these applications. Con- sideration of the configuration of the inverter side is outside the scope of this discussion and the control approach discussed here is focused on voltage control method, where the inverters \n![figure](URL) \n FIGURE 9-13 Block schematic of two-stage boost converter with four-leg inverter topology. \n work in closed loop to control the ac-side voltage at the converter end. Although only boost converter (first-stage) control is discussed below, all other converters in a dc microgrid may have similar control requirements as for the first-stage converter for the load-leveled dc voltage. As illustrated in Fig. 9-13, the boost converter-based four-leg inverter topology with dc-link and battery converter interfacing converters is shown, where a low-frequency grid interlinking ac/dc link voltage and battery converter dc-link voltage are kept constant. \n The control scheme of the four-leg inverter is shown in Fig. 9-14 where both negative andpositive sequence control loops are employed in two-step for controlling the load voltage $V_{a c r, d e}$ at the inverter end (which has only positive sequence components if balanced load is considered). A PI controller is also used for the frequency control. \n The operation of a four-leg inverter system with a variable speed wind generator under dif- ferent conditions of the dc-side voltage magnitude variations is shown in Fig. 9-15 [13] for grid- connected operation, which shows the grid-side current and ac-dc-link side currents along with the output and input voltage of the four-leg inverter under balanced load. \n![figure](URL) \nFIGURE 9-14 \n![figure](URL) \nFIGURE 9-15 \n## 9.7 Experimental Results of a dc Microgrid Application\n Experimental results are presented to show the two-stage converter operation in a $20-kW$ mi crogrid test bench as shown in Fig. 9-16 with the boost converter, dc/dc converter, battery converter, and dc/dc link interface between dc and ac grid microgrids. \n A comparison of the steady-state three-phase boost converter input ac current waveforms with and without load-leveling control mechanism of dc microgrid has been demonstrated in Fig. 9-17 for the power taken by the battery converter and the dc-dc converter. It is clearly seen that the three-phase boost converter ac current is in phase with the respective phase ac voltage to deliver the required power. Furthermore, the dc load-leveling strategy (also referred to as voltage droop) using the dc bus voltage achieves a total harmonic distortion (THD) of only2.97% as shown in Fig. 9-17. \n![figure](URL) \nFIGURE 9-16 \n![figure](URL) \nFIGURE 9-17 \n## 9.8 Conclusion\n A dc microgrid may have several distributed renewable sources connected to the dc link. The application of boost-type ac/dc converters has been demonstrated for obtaining unity power fac- tor and near-sinusoidal input currents even under unbalanced load conditions in a dc microgrid. The proposed control scheme has the advantage of implementation of only a boost converter and voltage source inverter with a dc-link capacitor to interface with the ac-grid side. The grid- tied control has been discussed for both current-mode control and for four-leg inverter applica- tion for a dc microgrid. Experimental results have shown better ac-side performance with reduced harmonic contents even under unbalanced load conditions. \n## References"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":71,"cells":{"text":{"kind":"string","value":"^1,...,L_n]^t\\left[ \\begin{array}{ccc}0 & & 0 \\\\0 & & 0 \\\\0 & & 0 \\\\L^1 & \\ldots & L^n\\end{array}\\right]$ \n $ = \\left[ \\begin{array}{cccc}y_n & 0 & \\ldots & 0 \\\\0 & 1 & 0 & \\ldots \\\\. & . & . & . \\\\0 & 0 & \\ldots & 0 \\\\. & . & 0 & . \\\\. & . & . & . \\\\\\end{array}\\right],$ \n\n\nfrom which it follows that $L_n$ is positive definite, as well as\nthe upper left corner block\n $ L_{n-1} = \\left[ \\begin{array}{cccc}0 & 0 & \\ldots & 0 \\\\0 & 1 & 0 & \\ldots \\\\. & . & . & . \\\\0 & 0 & \\ldots & 0 \\\\. & . & 0 & . \\\\. & . & . & . \\\\\\end{array}\\right] - \\left[\\begin{array}{c}y_1 \\\\. \\\\. \\\\y_{n-1}\\end{array}\\right]\\cdot [y_{n-1}^1, \\ldots , y_{n-1}^{n-1} ]. $ \n\n\nNext, write\n $ H^3 = \\left[\\begin{array}{c}R^3 \\\\R^{n-1} \\\\R^{m}\\end{array}\\right]=\\left[ \\begin{array}{c}L_1 & & 0 & \\\\& L_{n-1} & & 0 \\\\& & L_n & 0 \\\\Q^1 & Q^{n-1} & Q^m & L_N\\end{array}\\right]\\left[\\begin{array}{c}H^1 \\\\H^{n-1} \\\\H^{m}\\end{array}\\right],$ \n\n\nwhere we can apply induction to\n $ \\tilde{R}^{3} =\\left[\\begin{array}{c}R^{n-1} \\\\R^{m}\\end{array}\\right] =\\left[ \\begin{array}{cc}L_{n-1} & \\\\Q^{n-1} & Q^m\\end{array}\\right]\\left[\\begin{array}{c}H^{n-1} \\\\H^{m}\\end{array}\\right], $ \n\n\ngiven that the upper left corner block, $L_{n-1}$ is positive\ndefinite.\nThis shows that $L_n$ is invertible and by construction\n $\\left[\\begin{array}{cccc}y_1 & 1 & \\ldots & 0 \\\\y_2 & 0 & \\ldots & 0 \\\\. & . & . & . \\\\y_{n-1} & 0 & \\ldots & 0 \\\\. & . & 0 & . \\\\. & . & . & . \\\\\\end{array}\\right]\\left[ \\begin{array}{cccc}0 & 0 & \\ldots & 0 \\\\0 & 1 & 0 & \\ldots \\\\. & . & . & . \\\\0 & 0 & \\ldots & 0 \\\\. & . & 0 & . \\\\. & . & . & . \\\\\\end{array}\\right]^{-1} =\\left[\\begin{array}{c}y_1 \\\\y_2 \\\\. \\\\y_{n-1} \\\\. \\\\. \\\\\\end{array}\\right] $ \n\n\nhas the form required in the induction assumption.\nThere remains to show that $L_{n}$ is positive definite.\nNow we proceed as in the proof of case (ii).\n\n\nUse $a$ (and similarly $b$ ) to denote the $n-1$ dimensional vector\ndefined by the coordinates $1,...,n-1$ .\nThen,\n $ L_{n-1} = L_3 - L_3\\left[\\begin{array}{cc}\\frac{\\langle y^{a}, y^b\\rangle _{L^{-1}_2}}{\\langle y^a, L_2y^a\\rangle } y^ay^b & \\frac{\\langle y^b, y^c\\rangle _{L^{-1}_2}}{\\langle y^a, L_2 y^a\\rangle } y^a y^c \\\\\\frac{\\langle y^{b}, y^a\\rangle _{L^{-1}_2}}{\\langle y^a, L_2 y^a\\rangle } y^by^a & L_2 - \\frac{\\langle y^c, y^a\\rangle _{L^{-1}_2}}{\\langle y^a, L_2 y^a\\rangle } y^a y^c\\end{array}\\right] L_3. $ \n\n\nHence\n $ L = \\left[ \\begin{array}{ccc}0 & 0 & 0 \\\\0 & L_3 & L_N \\\\0 & L_N^t & L_m\\end{array}\\right] - \\left[ \\begin{array}{c}0 \\\\L_3\\end{array}\\right]\\left[\\begin{array}{cc}\\frac{\\langle y^{a}, y^b\\rangle _{L^{-1}_2}}{\\langle y^a, L_2y^a\\rangle } y^ay^b & \\frac{\\langle y^b, y^c\\rangle _{L^{-1}_2}}{\\langle y^a, L_2 y^a\\rangle } y^a y^c \\\\\\frac{\\langle y^{b}, y^a\\rangle _{L^{-1}_2}}{\\langle y^a, L_2 y^a\\rangle } y^by^a & L_2 - \\frac{\\langle y^c, y^a\\rangle _{L^{-1}_2}}{\\langle y^a, L_2 y^a\\rangle } y^a y^c\\end{array}\\right]\\left[\\begin{array}{c}L_3^t \\\\L_N^t\\end{array}\\right]. $ \n\n\nThen\n $ L = L_{N+3} - \\sum _{i=1}^3\\left[ \\begin{array}{c}0 \\\\L_i\\end{array}\\right]\\left[\\begin{array}{c}y^iy^n \\\\\\frac{1}{y_n^n} y^iy^n\\end{array}\\right]L_{N+3} = L_{N+3} - \\sum _{i=1}^3\\left[ \\begin{array}{c}0 \\\\L_i\\end{array}\\right]\\left[\\begin{array}{c}y^iy^n \\\\\\frac{1}{y_n^n} y^iy^n\\end{array}\\right]\\left( \\left[ \\begin{array}{c}0 \\\\L_i\\end{array}\\right]\\left[\\begin{array}{c}y^iy^n \\\\\\frac{1}{y_n^n} y^iy^n\\end{array}\\right]\\right)^tL_{N+3}. $ \n\n\nThe result for the matrix\n $\\tilde{L} = L_{N+3} - \\sum _{i=1}^3\\left[ \\begin{array}{c}0 \\\\L_i\\end{array}\\right]\\left[\\begin{array}{c}y^iy^n \\\\\\frac{1}{y_n^n} y^iy^n\\end{array}\\right]\\left( \\left[ \\begin{array}{c}0 \\\\L_i\\end{array}\\right]\\left[\\begin{array}{c}y^iy^n \\\\\\frac{1}{y_n^n} y^iy^n\\end{array}\\right]\\right)^t \\in M_{N+n}(^2, $ \n\n\nwhich is defined on a co-dimension two space in $^2$ , now follows\nfrom lemma 3.3.\n\n\nTo make the transition to $L$ from $\\tilde{L}$ , write for a linear\ntransformation, $A$ ,\n $M = P_{\\hat{\\pi }^{\\perp }}^{-1}AP_{\\hat{\\pi }^{\\perp }} = A^{\\prime } + \\beta Q +\\gamma Q^t + \\alpha QQ^t. $ \n\n\nThen $M^{\\prime } = A^{\\prime }$ , and $K = Q^{\\prime \\perp } M^{\\prime \\prime }Q^{\\prime } = \\alpha $ .\nThe equation (REF ) to be considered becomes\n $ f(A^{\\prime } + K Q^3 + \\alpha QQ^t)f(A^{\\prime }) = f( \\alpha Q Q^t). $ \n\n\nThis is equivalent to (REF ) after a further application of lemma 3.5.\n\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":72,"cells":{"text":{"kind":"string","value":"mmnik 764cdfecfd2023-07-11 15:47:44 +01007-10/cmd/cli/main.go\n/cmd/tpcds/tpcds.go\n/internal/pkg/schema/bloom/bloom.go\n/internal/pkg/schema/metadata.go\n/internal/pkg/schema/table.go\n/internal/pkg/schema/testdata/schema\n/internal/pkg/storage.go\n/internal/pkg/tpcds/schemas.go\n/pkg/client/db.go\n/pkg/client/datastructures.go\n/pkg/client/db_test.go\n/pkg/client/query.go\n/pkg/client/resultset.go\n/pkg/client/schema.go\n Make `WithColumns` a flag of `TableWithColumnsQuery`\nThis avoids introducing a new function variant for that functionality while allowing us to specify a separate list of column names and a list of columns to filter out.\n`db_test.go`'s `TestShowTablesWithColumns` has been extended to cover that case.\n \n\nmmapfd/mmapfd\n`mmapfd` is an io.ReadWriteCloser that mmaps a temporary file.\nExample Usage\n\nfd, err := mmapfd.New()\nif err != nil {\n panic(err)\n}\ndefer fd.Close()\nfile := fd.File()\n\nn, err := io.Copy(file, r)\nif err != nil {\n panic(err)\n}\nfile.Seek(0, io.SeekStart)\nn, err = io.Copy(w, file)\nif err != nil {\n panic(err)\n}\n\n\nmmapfd/pkg/file\n`file` provides common file operations (like opening, closing, locking, truncating) that work on file descriptors as opposed to `*os.File`. In addition it provides an io.ReadWriteAt implementation backed by mmap.\nExample Usage\n\nimport \"github.com/bytebase/mmapfd/pkg/file\"\n\n// Opens and optionally locks a file, returning a file descriptor.\nfd, err := file.OpenLock(\"test/data.json\", true)\nif err != nil {\n return nil, err\n}\ndefer file.CloseUnlocked(fd)\n\nvar size int64\nvar content []byte\n\n// Gets the size and content of the file.\nsize, err = file.Size(fd)\nif err != nil {\n return nil, err\n}\nif size == 0 {\n return nil, nil\n}\ncontent, err = file.MMap(fd, 0, size)\nif err != nil {\n return nil, err\n}\ndefer file.Unmap(content)\n\n// Does something with the file.\n// ...\n\n\nmmapfd/pkg/mmap\n`mmap` package provides convenient wrappers over syscalls and runtime APIs to make mmap/unmap code simpler.\nExample Usage\n\nimport \"github.com/bytebase/mmapfd/pkg/mmap\"\n\nvar bytes []byte\n\nfd, err := mmapfd.New()\nif err != nil {\n panic(err)\n}\ndefer fd.Close()\n\nbytes, err = mmap.MapFile(fd.FD(), int(fInfo.Size_))\nif err != nil {\n panic(err)\n}\ndefer mmap.Unmap(bytes)\n\n\nmmapfd/pkg/temp\n`temp` provides file operations (like open and close) using temporary files on disk.\nExample Usage\n\nfd, path, err := temp.OpenFile(\"data\")\nif err != nil {\n panic(err)\n}\ndefer temp.CloseFile(fd, path)\n\nn, err := io.Copy(w, oFile)\nif err != nil {\n panic(err)\n}\n\n\nmutils\nUtilities for migration project.\n\ngo install go.uber.org/mock/mockgen@latest\n\n\n\n# Generate mock\nscripts/mockgen.sh\n\n\n\n# Setup the environment and run test\nscripts/test.sh\n\n\nUsage of LFS files:\n\ngit lfs install\ngit lfs track \"**.csv\"\ngit add .gitattributes\ngit add ./*.csv\n\n\nSupported engines\nSQLite (partial: sql driver)\nMySQL (partial: go-mysql driver)\nSQL Server (partial: tds driver)\nPostgres (full: sql driver)\nTODO:\nSQL Server: Use SELECT TOP query to replace LIMIT OFFSET.\nMySQL: Add more tests and maybe use the full feature mysql driver\nHow to run test\nPrepare PostgreSQL server using make docker_db command to launch docker image\nPrepare PostgreSQL client with psql or commandline psql of postgresql 14 version and run sql/drop_postgresql_test.sql to create test databases. You also need to create 2 new users with create role privilege:\n\n// username: migrate_pgwriter_test, password: migratepgwritertest\nCREATE USER migrate_pgwriter_test CREATEROLE CREATEDB;\n// username: migrate_pgreader_test, password: migratepgreadertest\nCREATE USER migrate_pgreader_test;\n\n\nCheck your PostgreSQL host IP in file postgresql.conf with parameter listen_addresses\nCheck you postgresql listen on port 5432 (default).\nRun test: go test . -p=1\nTips: if the test failed with error pq: terminating connection due to administrator command, please retry test a few time until it passed.\nYou can use a simple docker-compose.yml to setup the above test environment\nRun docker-compose: docker-compose up\nRun docker db setup: make docker_db\nRun test: go test . -p=1\nBenchmark\nWe run 100,000 INSERT queries to Postgres database with Go and Elixir. We compare the performance on Postgres driver, PostgreSQL parser and copy_from file execution time.\n\n\n\n\n\nmake build # build binary `./migrate`\n./migrate --help\n\n# PostgreSQL to PostgreSQL\n./migrate pg \"dbname=postgres user=migrate_pgreader_test host=172.23.1.2 sslmode=disable\" pg \"dbname=postgres user=migrate_pgwriter_test host=172.23.1.2 sslmode=disable\" --table user --query \"SELECT * FROM user\" --limit 100\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":73,"cells":{"text":{"kind":"string","value":"l. _Hom_ _ (B, A)_ We note that the exactness of (i) follows from that of (ii) and (iii), since each morphism is an isomorphism. But the proof of (ii) is trivial, as every morphism in _Mod_ − _A_ has a kernel and, moreover, by 7.4, the functor _M⊗_ is exact. Finally, the proof of (iii) depends on the following general lemma:\n\n**Lemma.** If _i:A → B_ is a monomorphism in _A_, the functor _Hom A(i, M)_ is an epimorphism from _Hom A(B, M)_ to _Hom A(A, M)_ for every _M_ in _Mod −A_.\n\nPROOF. If _g ∈ Hom A(A, M)_ we may take _Hom A(i, M)(f) = g_ for _f_ any morphism from _B_ to _M_ such that _fi = g_. So we must show that such a morphism _f_ exists. Let _X_ be the subobject _i(A)_ of _B_, let _X′_ be the kernel of the identity map 1: _B → B_, let _p′_ be the corresponding projection onto _B/X′_, and let be the restriction of _g_ to _X_ (recall that _M_ is discrete). By 1.6 and 4.4, the factorization through _X/X′_\n\nof the homomorphism _g/X_ : _X/X′ → M_, given by 4.4, is uniquely determined by the choice of over _X/X′_. If this factorization is the morphism ( _X/X′_ ) _⊕_ ( _B/X′_ ) _M_, then the composite\n\nis clearly a homomorphism from _B_ to _M_ such that _fi = g_, for if 7.7( _c_) is the inclusion, then _iB = r p′i = r iB′ = j iA_ = _i A_(by 7.7( _a_) and ( _b_ )), so _f_ is well defined by\n\nConsequently, _Hom A(i, M)_ is an epimorphism. |\n\nPROBLEMS\n\n****. Prove that the functor _M⊗_ is the zero functor if and only if _M_ is the zero object in _Mod − A_.\n\n****. Prove that the tensor product of two free modules is also free.\n\n****. Let _M_ be a right _A_ -module. Prove that the left ideal { _a_ | _Ma = 0_ } of _A_ is the largest left ideal of _A_ in the kernel of the functor _M⊗_.\n\n****. An exact sequence _T′ → T → T′′ → 0_ in _Mod − A_ is said to be _split exact_ if there is a (necessarily monic) morphism _k : T′′ → T_ such that _wk =_ 1. Prove that the following statements are equivalent:\n\n(1) _T′ → T → T′′ → 0_ is split exact.\n\n(2) _T ∼ T′ ⊕ T′′_.\n\n(3) Every sequence of left _B_ -modules _0 → X → Y → Z_ which becomes exact after tensoring by _T_, that is, by applying the functor _T⊗_, is exact. (Show that _T′_ → _T_ has a _section_, by first exhibiting a morphism _T → T′_ and tensoring with _Z_.)\n\n****. Suppose _X → Y → Z_ is an exact sequence of left _B_ -modules.\n\n(1) Give a direct proof of the lemma in this section.\n\n(2) Show, using Problem 4, that if (i) _M_ is free or (ii) _Z_ is projective, or (iii) _M⊗ X → M⊗ Y_ is injective, then _M⊗ Y → M⊗ Z_ is an epimorphism.\n\n****. Assume that the given exact sequence _X → Y → Z_ splits. Show, using Problem 4, that _M⊗ Y → M⊗ Z_ is also an epimorphism for any _M_ in _Mod_ - _A_.\n**10. Equivalent Categories and Functors**\n\nWe often have the impression that certain mathematical structures are really \"the same\". This idea can be expressed precisely by making use of the concept of a category and, in particular, by describing the idea of a _categorical equivalence_. The purpose of this section is to clarify the definition of a categorical equivalence and to introduce some relevant concepts. We begin by defining a _functorial isomorphism_.\n\n**Definition.** Let _C_ and _D_ be categories and let _F : C → D_ and _G : D → C_ be functors. A _functorial isomorphism_, or _equivalence of categories_, from _F_ to _G_ is a natural isomorphism _μ_ : 1 _C → G ◦F_ such that _ν_ : 1 _D → F ◦ G_ is also a natural isomorphism. Here 1 _C_ and 1 _D_ are the identity functors. A functor _F : C → D_ is said to be a _category equivalence_ (or to establish _categorical equivalence_ ) if there is a functor _G : D → C_ such that _F_ and _G_ are functorially isomorphic.\n\nIt is clear that _F_ is a category equivalence if and only if _G_ is a category equivalence. If _μ_ establishes a functorial isomorphism between _F_ and _G_, one might say that _μ_ establishes a category equivalence between _C_ and _D_, and, when the context is clear, _μ_ may be called a category equivalence.\n\nWe shall prove two useful lemmas about category equivalences. To state them more precisely, we introduce two concepts. One is the \"smallest\" category containing a given set.\n\n**Definition.** If _S_ is a set and _C_ is a category, a __ functor__ from _S_ to _C_ is a family _fs_ of objects of _C_, one for each _s_ in _S_ (see 3.1)."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":74,"cells":{"text":{"kind":"string","value":"Article: Difference between revisions of \"Problem 5\"\nRevision as of 01:40, 4 March 2007\nPart 1.\nFix n\\in \\mathbf{N} and show that the set:\n\\Sigma_n = \\left\\{(p_1,...,p_n)\\in \\mathbf{R}^n |p_k>0 \\forall k, \\sum\\limits_{k=1}^n p_k = 1\\right\\}\nis connected\nProof\nIf we can find a path between all points of \\Sigma_n, \\Sigma_n will be path connected, and thus connected. Choose any p,q\\in\\Sigma_n. Define:\nf:[0,1]\\to\\Sigma_n, f(t)=tp+(1-t)q\nNote that if t\\in[0,1], the sum of the components of f(t) is:\n\\sum\\limits_{k=1}^n (tp_k+(1-t)q_k)=\\sum\\limits_{k=1}^n t(p_k+(1-t)q_k)=t\\left(\\sum\\limits_{k=1}^n p_k\\right)+(1-t)\\left(\\sum\\limits_{k=1}^nq_k\\right) = t+(1-t)=1\nTherefore f(t) \\in\\Sigma_n, and thus f:[0,1]\\to\\Sigma_n. It is clear that f is continuous, therefore f is a path between p and q. Since p,q were arbitrary, \\Sigma_n is path connected, and therefore connected.\nPart 2.\nShow that \\Sigma_{\\infty}=\\{(p_1,p_2,...,p_k,...)|p_k>0\\forall k and \\sum\\limits_{k=1}^{\\infty}p_k=1\\} is a connected metric subspace of l_1 (here l_1 is the space of absolutely convergent series with the metric d(x,y)=\\sum|x_i-y_i|)\nProof\nDefine f:\\Sigma_{\\infty} \\to \\Sigma_{\\infty} as:\nf((p_1,p_2,...))=(e^{-p_1},e^{-p_2},..., e^{-p_k},...)\nThen:\n\\sum\\limits_{k=1}^{\\infty}e^{-p_k} =\\sum\\limits_{k=1}^{\\infty}\\lim\\limits_{m\\to\\infty}(1-\\frac{p_k}{m})^m=\\sum\\limits_{k=1}^{\\infty} \\lim\\limits_{m\\to\\infty}\\left(1-\\frac{1}{m} \\right)^{mp_k}=\\sum\\limits_{k=1}^{\\infty} \\lim\\limits_{m\\to\\infty}\\left((1-\\frac{1}{m})^m\\right)^{p_k}=e^{-\\sum\\limits_{k=1}^{\\infty} p_k} = 1\nTherefore f(p) \\in \\Sigma_{\\infty}. f is clearly continuous, and is its own inverse. f is therefore a homeomorphism, and is thus a connected metric space.\nPart 3.\nIs it connected if we change l_1 to l_2? (with metric: d(x,y)=\\left(\\sum|x_i-y_i|^2 \\right)^{\\frac{1}{2}})\nProof\nThe metric in l_2 doesn't change the topology.\nRetrieved from \"https://www.projectrhea.org/rhea/index.php?title=Problem_5&oldid=6165\""},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":75,"cells":{"text":{"kind":"string","value":"---+-:---:----;: ;-.....I-----~;-;·~ ~ =Pump characteristic l ~ ·--.. ~;......JQ, a ';;-+- :t'! ~~;I,.\n~---l··--......l QmJ '\n.; 50;-m;,,.. \"',;.\n~----.\nSystem characteristic '~O~-----I 3. (b) and (c) Consider the circuit shown in Fig.3(b). The pump discharges are 0 1 to branch l and 0 2 to branch 2.\n. a1m - oJlm L, V?\nI Vr; a,,lml - (i) Pm = + o.fml -- (ii) a = ~~ I+0l_~lm2) --· (iii) oJ 1m 2 Q lm2 - a, lm2 - ~ L, h, ~ (iv) a, 1m1 - - Vri - a; 1m 2 - 'L'2 0, V?2 = (v) Adding equation (iii) and (v) and then simplifying for Q1 1m1 = (01+~lm 2) _ 0, + ~lm2 ( ~+ ~) +2 a, ~Jm2 2 +2 a1 ~1m1 1m1 a1 a, (vi) Now Q2 =(0,-Q lm,) and replacing Q1 1m2 in equation (iii) in terms of Q~ a~, oJ and L,; Ql1m2 = 2L,[p2 2a1a2Vt! 1m1 2a1~ a2 4 a 2 Vt!2] ~Q2 2~ a, 2~ a1 a2 The above equation is a single term discharge-head characteristic.\n3. (d) Assume one pump set, QHdischarge ~ system discharge for A, B and C with out interferences.\nFor A, 1m = 525.8 m = 16440 (1-0.05 ) 1/2 -- ~~~ - ForB, V525.8 r.; For C, (vi) 39.06 m For the head of the hydraulic turbine to be constant, the three pumps should be placed in series (see Fig. 4).\nQ= 300 L/sec 1460.8 57.5 m _ I I A1 A2 Fig. 4. Pump operation.\noJ 1m 2 L, 2 02 oJ2 -L2 + 2 _ 39.06 + 28.11 = 67.17 m L2 4. For Q;.,a, a) where oJ = maximum discharge, a 11 = speed.\n..c.\na1m2 (300 + a,125.8 1/2 r:vJ = 2 125.8 = 57.27 m~...\nJ c 1/2 ) oJ 1m2 02 oJ2 V525.8 L1 r.; ; 44.25 m Q= 200 L/sec 57.27 x 1/2 0, r.; 4 2 ~~06 x 1/2 = 200 L/sec ~2 = 28.11 m 13.8 107330 x 16.7 = a1 Qlm = 0; (1-Q~a?r· (i) This can also be written as Q 1m a2 where a=O·.\n[OJ (1- Q 2m ) + l] (ii) R = O; (1-Q~a?)1·1 (iii) Taking log on both sides log (Q1m) = log(a2 0,) - (1 ·1) log (1- Qm 2 ) - log(a2) o o (iv) This equation is of the form Y= m X + c for a straight line.\nm and c are to be determined from a number of trial values of OJ at constant head.\nE.g. for H = 300 m oJ (L/sec) 50.62 55.9 62.25 63.12 1m2 (m) log(OJ 1m 2) 5.3808 5.5064 5.6395 5.6636 X=log(1-Q 1m ) - log (a2) 0.000012103 0.00011359 0.00083153 0.0010239 -4.9168 -3.9482 -2.0804 -1.9901 The above data is plotted on the following graph.\n0.0000 0.0001 o.OOOOOI Xl 0.0002 0.0003 I 0.0004 0.0005 I I I 0.0006 Y=-3163.3+0.0000338 x Slope m = -3163.3 (v) intercept = 0.0000338 (vi) From Equation (iv) log (Q1m2) + 1 ·1 log (1-Q1m2) = log (a1 ) - log (0.) log (Qlm2) (1 +1.1) = (log(a1)-log(a2))-1.1log(1-Q1m2) log(Q 1m2) = [log(a,-)-log(a2)]-1.1 log(1-Q1m2) ( 2'1 ) _ 2'1 - 2.1log ( a.)-2.1 log ( a2)+2.2 log (1-Q m) =log(a1 )-log(a2)-1.1 log ( 1 -Q 1m2) Substitute in equation (vi) log ( a2) = +-5.6636 1.1 log ( 0.5 ) a = 0.0111 0.6 = +-3.7284 3.65 +-2.7284 3.65 (vii) From equation (v) 1og(a1 ) =-- 3163.3 + 1og(a2) 3163.3 + log (0.0111) = 9.4974.\na1 = 3.15xl09 (viii) By further iterations, for H = 600 and H = 1200 m, we obtain log(a1 ) = 22.043, log(a2 ) = 2.91, a1 = 1.099 xl0 22, a2 = 812.8 5. (i) The centrifugal pump delivers 1000 1/sec against a head of 50 m.\n2. A series of parallel plate electrodes are arranged inside a circular cylindrical conductor as shown in Fig.2. The spacing between the electrodes is a and the diameter of the conductor is 2a, and the length is L. The supply voltage is V and the velocity of the moving electrode V~ Calculate the displacement current density and the electric field strength with and without the moving electrode.\nElectrical length of the slot A.\nc Figure 2 ~3~.\n3.\nFor the circuit shown in Fig.3 obtain an expression for the input impedance in terms of Z1, Z2, Z3 and Z4.\nThe current was measured as 5 A. Find the ohmic value of each element.\noQ:jI 0, Figure 5 Figure 4 5. A 150 W light bulb, operated from 250 V, has a resistance when cold of 42.Q and operates at 420°C. Determine the resistance at 150°C (when hot) and the rise of temperature while operating at its rated voltage.\nNeglect thermal capacitance and assume that the resistance of the filament is given by the relation R=A+B(T-20°C).\nAnswers: 1. (i) 9.92 Jlm, (ii) 2.137 Jlm 2.\n3.\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":76,"cells":{"text":{"kind":"string","value":"i_{(\\l{i})}, I^*\\backslash J_i}]\\right)_{r>s} &=& \\left[2 \\epsilon_{r+1, s+1} \\chi\\_{A_{(\\l{r})}\\cup A_{(\\l{s})}}^{-1},\\begin{pmatrix} -i_{(\\l{r})} & i_{(\\l{r})}\\\\ i_{(\\l{s})} & -i_{(\\l{s})} \\end{pmatrix} \\right]\\;.\n\n \\end{eqnarray}\n\nSince $I^*\\backslash J_r$, $I^*\\backslash J_s$ are orthogonal,\nwe obtain \n \\[ \n \\left[2 \\epsilon_{r+1, s+1} \\chi\\_{A_{(\\l{r})}\\cup A_{(\\l{s})}}^{-1},\\begin{pmatrix} -i_{(\\l{r})} & i_{(\\l{r})}\\\\ i_{(\\l{s})} & -i_{(\\l{s})} \\end{pmatrix} \\right] = \\left[1, \\begin{pmatrix} i_{(\\l{r})} & i_{(\\l{r})}\\\\ i_{(\\l{s})} & i_{(\\l{s})} \\end{pmatrix} \\right]\\]\nUsing Proposition~\\ref{prop:exact}, for any $(s_1, \\dots, s_k)$ there is a unique sequence of integers $(z^*, \\dots, z_1)$ that belongs to the image of $\\Psi^{Z_{B}}$,\nand $(\\varphi_{{\\bf B}}(\\l{\\theta}),z_*)=\\Psi^{Z_{{\\bf B}}}(\\l{\\theta}, (z_*, \\dots, z_1))$.\nIn our case, we have that $s_\\l{s}=s_\\l{r}=1$, thus $z_1=0$, \nsince if not, it is easy to see that the determinant of the right hand side of \n$[ \\chi_{A_{(\\l{s})}}, i_{(\\l{r})}] + [\\chi_{A_{(\\l{r})}}, -i_{(\\l{r})}] \n= \\left[1, \\begin{pmatrix} i_{(\\l{r})} & i_{(\\l{r})}\\\\ i_{(\\l{s})} & -i_{(\\l{s})} \\end{pmatrix} \\right]$\nis non-zero. We obtain\n\\begin{equation}\n \\label{eq:bij2} \n \\varphi_{{\\bf B}}(\\l{\\theta}) = (\\l{i}+\\epsilon z^*)_*, \n \\mbox{ where } z^* \\mbox{ is the unique solution of } 1= [\\chi_{A_{(\\l{r})}\\cup A_{(\\l{s})}}^{-1}, i_{(\\l{r})}-i_{(\\l{s})}]\n \\;.\n \\end{equation}\n\nWe will prove below that \nfor any integers $i_{\\l{r}} \\neq i_{\\l{s}}$ and $0\\leq r\n RangeIndex: 25 entries, 0 to 24\n Data columns (total 2 columns):\n # Column Non-Null Count Dtype \n --- ------ -------------- ----- \n 0 Hours 25 non-null float64\n 1 Scores 25 non-null int64 \n dtypes: float64(1), int64(1)\n memory usage: 528.0 bytes\n\n\n### Data Analysis and Visualizations\n\n\n```python\nplt.scatter(df['Hours'],df['Scores'])\nplt.title('Hours vs Score')\nplt.xlabel('Hours')\nplt.ylabel('Score')\nplt.show()\n```\n\n\n
\n\n\n### Observation:\nFrom the above scatter plot, we can see the relation between the number of hours studied and the corresponding scores obtained are directly proportional. Thus,we can predict the marks scored by students based on the hours studied.\n\n### Selecting Dependent and Independent Variables\n\n\n```python\nx = df.drop('Scores',axis=1)\nx\n```\n\n\n\n\n Hours\n 0 2.5\n 1 5.1\n 2 3.2\n 3 8.5\n 4 3.5\n 5 1.5\n 6 9.2\n 7 5.5\n 8 8.3\n 9 2.7\n 10 7.7\n 11 5.9\n 12 4.5\n 13 3.3\n 14 1.1\n 15 8.9\n 16 2.5\n 17 1.9\n 18 6.1\n 19 7.4\n 20 2.7\n 21 4.8\n 22 3.8\n 23 6.9\n 24 7.8\n\n\n\n\n```python\ny=df['Scores']\ny\n```\n\n\n\n\n 0 21\n 1 47\n 2 27\n 3 75\n 4 30\n 5 20\n 6 88\n 7 60\n 8 81\n 9 25\n 10 85\n 11 62\n 12 41\n 13 42\n 14 17\n 15 95\n 16 30\n 17 24\n 18 67\n 19 69\n 20 30\n 21 54\n 22 35\n 23 76\n 24 86\n Name: Scores, dtype: int64\n\n\n\n### Splitting Data into Training and Testing Sets\n\n\n```python\n# Splitting Training and Test Set\n\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2, random_state=0)\n```\n\n### Training Data\n\n\n```python\n#Fitting Simple Linear Regression to the Training Set\nfrom sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression()\nmodel.fit(x_train, y_train)\nprint(\"Training complete.\")\n```\n\n Training complete.\n\n\n\n```python\n#Printing the slope/coefficient and intercept values\nprint('Slope:',model.coef_)\nprint('Intercept:',model.intercept_)\n```\n\n Slope: [9.91065648]\n Intercept: 2.018160041434683\n\n\n\n```python\n#Plotting the regression line\nline = model.coef_*x+model.intercept_\n\n#Plotting for the test data\nplt.scatter(x,y)\nplt.plot(x,line, color='red')\nplt.show()\n```\n\n\n
\n\n\n### Testing Data\n\n\n```python\n#Predicting the scores\ny_pred = model.predict(x_test)\ny_pred\n```\n\n\n\n\n array([16.88414476, 33.73226078, 75.357018 , 26.79480124, 60.49103328])\n\n\n\n\n```python\n#Comparing Actual vs Predicted\n\npd.DataFrame([y_test, y_pred], index=['Actual','Predicted'])\n```\n\n\n\n\n 0 1 2 3 4\n Actual 20.0 27.0 69.0 30.0 62.0\n Predicted 16.9 33.7 75.4 26.8 60.5\n\n\n\n### Model Evaluation\n\n\n```python\n#Mean Absolute Error\nfrom sklearn.metrics import mean_absolute_error\nprint('Mean Absolute Error: ' ,mean_absolute_error(y_test, y_pred))\n```\n\n Mean Absolute Error: 4.183859899002975\n\n\n### What will be the predicted score if a student studies for 9.25hrs/day?\n\n\n```python\nhours = [[9.25]]\npredicted_score = model.predict(hours)\nprint(\"Number of Hours =\",hours)\nprint(\"Predicted Score = \", predicted_score[0])\n```\n\n Number of Hours = [[9.25]]\n Predicted Score = 93.69173248737538\n\n\n### Conclusion:\nThe predicted score if a student studies for 9.25hrs/day is 93.69.\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":78,"cells":{"text":{"kind":"string","value":"factorial(_::Int64)\r\n begin\r\n\t\tfac(0::Int64) = one(0) \r\n\t\tfunction fac(n::Int64)\r\n\t\t\tn * fac(n-1)\r\n\t\tend\r\n\t\tfunction fac1(n::Int64)\r\n\t\t\tfac1(n::Int64,t::Int64=1::Int64) = iszero(n::Int64) ? one(n::Int64) : fac1(n-1::Int64,n::Int64*t::Int64)\r\n\t\tend\r\n\t\tfac1(n::Int64)\r\n\t\tfac(n::Int64)\r\n\t\tfunction fac2(n::Int64)\r\n\t\t\treturn( n*(n-1))\r\n\t\tend\r\n\t\tfac2(n::Int64)\r\n\t\tfunction fac3(n::Int64)\r\n\t\t\ta=1::Int64;\r\n\t\t\tfor i::Int64 = 2::Int64:n::Int64; a::Int64*=i::Int64; end; a::Int64\r\n\t\tend\r\n\t\tfac3(n::Int64)\r\nend\r\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":79,"cells":{"text":{"kind":"string","value":"sb 型断路器试验结果分析\n\n**摘要**:本文介绍了几种常用的 sb 型断路器的试验结果,并进行了简要分析,为用户和开关生产厂家选择 sb 型断路器提供了参考。\n\n**关键词**:sb 型断路器;试验结果;分析\n\n**一、引言**\n\nsb 型断路器是一种重要的电力设备,广泛应用于电力系统中。本文对几种常用的 sb 型断路器进行了试验,包括电流有效值、断路电压、断路能力、分闸时间、合分闸时间、能量损耗和焦耳积分等方面。通过对试验结果的分析,我们可以了解这些断路器的性能特点,为用户和开关生产厂家选择合适的断路器提供参考。\n\n**二、试验结果**\n\n(一)电流有效值\n\n在不同的试验条件下,几种 sb 型断路器的电流有效值如下表所示:\n\n|断路器型号|试验条件|电流有效值(A)|\n|---|---|---|\n|sb1|条件 1|1000|\n|sb2|条件 2|1200|\n|sb3|条件 3|1500|\n|sb4|条件 "},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":80,"cells":{"text":{"kind":"string","value":"Q is always true, and O will return to the set of all points. This means that V() = V(Q), which implies that R and S are the same sets, which is a contradiction. So O cannot be a finite point (which are all the points of our affine geometry) and O must be the point at infinity. A similar proof may be given that any point at infinity is O. Thus, the points at infinity of a Desarguesian projective plane are all the same point. So, if the points on the line at infinity of a Desarguesian projective plane form a group, this group has only one member; namely, O. In this case we do not say that the points form a group, because there is one member too few. At best the points form a nullary group, the elements of which are 0, which is nothing. The group is no more than a name, much like the number zero. By contrast, in a non-Desarguesian projective plane, the points at infinity form a proper group, which has more than one member, with at least two members and possibly many more. Moreover, since they form a group under multiplication, this implies that there are at least three, because otherwise we would have 2×2=2. Moreover, since there are at least two points at infinity, the line at infinity cannot be incident on a (Euclidean) point in such a plane. Conclusion. To summarize, we have shown that the projective plane construction of Section 6 of the paper works as promised; that is, given any group G, we can construct a projective plane in which the group of transformations of the points at infinity is isomorphic to G. So the promised alternative construction of a projective plane exists. We have shown that if such a projective plane P is Desarguesian, then the points at infinity form a nullary group, which is more of a name than an object. In this case P and its group of transformations of the points at infinity do not fit in the scheme of Figure 1. But if P is not Desarguesian then the points at infinity form a proper group, and it fits in the scheme. So, we have shown that the scheme of Figure 1 cannot fail to find a projective plane. References [1] Bennett, M. K. and Kechris, A. S., 1996, The descriptive set theory of Polish group actions, Cambridge Univ. Press. [2] Coxeter, H. S. M., 1955, Introduction to geometry, Wiley. [3] Gariepy, R., 1974, Handbook of the geometry of Banach spaces, Elsevier. [4] Gruenbacher, H., 1967, Finite groups, Holt. [5] Kannan, S., 1979, Linear algebra, vol. I: From Gaussian elimination to canonical forms, Birkhauser. [6] Milnor, J., 1963, Introduction to algebraic K-theory, Annals of Math. Studies, 72, Princeton Univ. Press. [7] O'Doherty, R. and Henrici, H., 1989, Vector spaces and linear transformations, Wiley. [8] Salomaa, A. and Soittola, M., 1978, Automata, semigroups, and the universe of formal languages, Texts and Monographs in Computer Science, Springer. [9] Waelder, H., 1969, Geometrical interpretations of Boolean algebra and projective logic, in: Eaton, R. M. (ed.), The modern syllogism, Dover. [10] Wood, H., 2017, Desarguesian and non-Desarguesian projective planes: A brief introduction, Logica Universalis 11, 493–515, doi: 10.1007/s11787-017-0199-y. [11] Wood, H., 2020, The duality principle and linear transformations: Part 2, Logica Universalis, to appear. Received: 26 January 2020 Accepted: 6 February 2020 © 2020 Springer Nature Switzerland AG 2020 Log. Univers. 14:459–474 473\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":81,"cells":{"text":{"kind":"string","value":"> Hello,\n\n ` |> I want to be able to detect which partition in the system\n ` |> has been mount on my home partition (/)\n `\n ` /home? All you have to do is find all mount points that\n ` are under /home and find their physical device, or all of the\n ` partitions that are mounted anywhere.\n\n\n ` I'm not sure if this is the best way to do this, but it does\n ` seem to give you what you asked. I would take care not to get\n ` rid of all your cdroms or things if you use this :)\n\n ` mountlist.sh:\n\n ` /bin/ls -la /dev | /bin/grep disk | /bin/cut -f 9 -d \" \" \\\n ` | /bin/xargs -i df -T | /bin/awk '/ext2/ { print $2 }' | \\\n ` /usr/bin/perl -e 'while () { chop; if (\"/home\" =~ /$_\\/*/) {\n print \"$_\\n\" }}'\nHi, I hope the above is of some help\nI think with this you will be able to find which disk partitions are mounted\non your root partition.\n\nYour question sounds a little strange, if I interpreted it correctly.\nAll partitions are mounted on some mountpoint. The mountpoint is part of the root\npartition. If the mount point is outside the root partition, then\nyou can't mount anything in that mountpoint...\n--\n _____ | We have all seen too\n|_| |_| | of \"the movies\".\n | | What else have\n_________ | people seen?\n | | | What will we\n | | | see next?\n | _| | (c)\n / |\n\n. _____ . _____\n|* |__| .. ...|* | |__| .. ...\n|*___| | .| |* | | | .|\n | ( | .__\n| .| ( ||* . . ___.\\ | | ||\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":82,"cells":{"text":{"kind":"string","value":"X|=\\gamma ^{\\epsilon }}P_{\\beta }(n\\in \\text{\\rm supp}~|S_n|-1)\\\\&=&P_{\\gamma \\epsilon ^{\\frac{1}{2}}+2\\epsilon ^{\\frac{3}{4}}}(n\\in \\text{\\rm supp}~|S_n|-1)\\\\&=&O(e^{-\\frac{c\\log ^{4}n}{n}})\\text{~when~}n\\rightarrow \\infty .$ \nFor (b),\n $&P_{\\beta }(|X|=o(n), n\\in \\text{\\rm supp}~|S_n|-1)\\\\&=&\\sum _{o(n)=\\gamma \"rx data\" from remote system (output from UUT).\r\n -- tx_busy -> set high while UUT is transmitting\r\n -- \r\n -- The remaining signals are inputs from the remote system. \r\n -- tx_data -> data to be sent out to remote system.\r\n -- tx_start -> set high (pulsed) for one clock cycle when transmitter should \r\n -- start transmitting tx_data.\r\n -- rx_data_v -> '0' to ignore the 'rx_data', '1' otherwise.\r\n --\r\n -- All signals listed here should have 1 cycle latency when changing value (with\r\n -- the exception of rx_data which does not propagate through tx_fifo).\r\n ----------------------------------------------------------------------------------\r\n signal uart_ctrl_rx_data, uart_ctrl_rx_data_v : std_logic_vector(7 downto 0);\r\n signal uart_ctrl_tx_start, uart_ctrl_tx_busy, uart_ctrl_rx_error : std_logic;\r\n signal uart_ctrl_tx_data : std_logic_vector(7 downto 0);\r\n signal uart_ctrl_rx_valid : std_logic_vector(7 downto 0);\r\n\r\n ----------------------------------------------------------------------------------\r\n -- tx_fifo_rd, tx_fifo_rd_ready and tx_fifo_error are inputs. \r\n -- tx_fifo_rd -> '1' indicates UUT is ready to send\r\n -- tx_fifo_rd -> '1' for one clock cycle when tx_fifo should send another byte\r\n -- tx_fifo_error -> '1' indicates error on data transmision (start/stop/parity error)\r\n --\r\n -- tx_fifo_empty and tx_fifo_data are outputs from the UUT.\r\n -- tx_fifo_empty -> '1' if the tx fifo is empty.\r\n -- tx_fifo_data -> data to be sent to remote system\r\n ----------------------------------------------------------------------------------\r\n signal tx_fifo_empty : std_logic;\r\n signal tx_fifo_rd, tx_fifo_rd_ready, tx_fifo_error : std_logic;\r\n signal tx_fifo_data : std_logic_vector(7 downto 0);\r\n\r\n ----------------------------------------------------------------------------------\r\n -- all signals on rx_fifo_ctrl are outputs from the UUT. \r\n --\r\n -- wr_d -> Data to be sent from the rx_fifo\r\n --\r\n -- The following signals are outputs from the remote system. \r\n -- wr -> '1' indicates fifo data ready\r\n -- full -> '1' when rx fifo is full\r\n ----------------------------------------------------------------------------------\r\n signal wr_d, wr : std_logic;\r\n signal full : std_logic;\r\n \r\n ----------------------------------------------------------------------------------\r\n -- \"Control\" signals to be able to read out data from rx_fifo and send data\r\n -- to tx_fifo\r\n ----------------------------------------------------------------------------------\r\n signal rx_fifo_read : std_logic;\r\n signal tx_fifo_write : std_logic;\r\n signal tx_fifo_write_data : std_logic_vector(7 downto 0);\r\n \r\n ----------------------------------------------------------------------------------\r\n -- error_signal is a signal to be able to see the error flag if an error occurs\r\n -- on the tx_fifo during simulation.\r\n ----------------------------------------------------------------------------------\r\n signal error_signal : std_logic;\r\n \r\nbegin\r\n\r\n ----------------------------------------------------------------------------------\r\n -- Instantiate and bind uut component with uut module.\r\n ----------------------------------------------------------------------------------\r\n UUT : entity work.uart_wrapper\r\n generic map(\r\n clock_freq => clock_freq,\r\n bit_length => bit_length\r\n )\r\n port map (\r\n clk => clk,\r\n reset => '0',\r\n uart_ctrl => open,\r\n tx_fifo => open,\r\n rx_fifo => open\r\n );\r\n \r\n \r\n ----------------------------------------------------------------------------------\r\n -- Instantiate and bind rs232_line_model module (transmission line model).\r\n -- Define internal signals\r\n -- \r\n -- This transmission line model supports the same features as the uart. If these\r\n -- features are extended, they must be added to both the uart and rs232_line_model\r\n -- or the module will not work.\r\n --\r\n -- uart_ctrl_rx_valid -> signal output of '1' when data received is valid\r\n -- rx_fifo_read_signal -> signal output to read data from rx_fifo\r\n -- tx_fifo_wr_data -> signal output to read data from tx_fifo\r\n ----------------------------------------------------------------------------------\r\n UART_LINE : entity sim.rs232_line_model\r\n generic map (\r\n bit_length => bit_length,\r\n stop_bits => stop_bits,\r\n parity_bit => parity_bit\r\n )\r\n port map (\r\n tx_bit => tx_fifo_data,\r\n tx_empty => tx_fifo_empty,\r\n rx_start => tx_fifo_rd,\r\n rx_busy => tx_fifo_rd_ready,\r\n rx_err => tx_fifo_error,\r\n rx_bit => uart_ctrl_rx_data,\r\n rx_data_v => uart_ctrl_rx_data_v\r\n );\r\n \r\n tx_fifo_wr_data <= uart_ctrl_rx_data;\r\n\r\n ----------------------------------------------------------------------------------\r\n -- Process to send data to tx_fifo\r\n ----------------------------------------------------------------------------------\r\n tx_fifo_process : process\r\n variable seed1, seed2 : positive := 0; -- initial seed values\r\n variable rand_num : real; -- random real-number value in range 0 to 1.0 \r\n variable rand_bin : std_logic_vector(7 downto 0);\r\n \r\n begin\r\n\r\n --wait for (8 ns);\r\n --rx_fifo_read <= '0';\r\n tx_fifo_write <= '0';\r\n\r\n wait until tx_fifo_empty = '1';\r\n wait for (3*clk); -- make sure transmission is finished sending.\r\n -- generate a new random data value\r\n uniform(seed1, seed2, rand_num);\r\n --convert to binary\r\n rand_bin := std_logic_vector(to_unsigned(integer(rand_num*256), 8));\r\n report(\"input to TX is \" & integer'image(to_integer(unsigned(rand_bin)))) severity note;\r\n tx_fifo_write_data <= rand_bin;\r\n report(\"input is \" & integer'image(to_integer(unsigned(tx_fifo_write_data)))) severity note;\r\n wait for clk;\r\n tx_fifo_write <= '1';\r\n wait for clk;\r\n\r\n end process;\r\n \r\n ----------------------------------------------------------------------------------\r\n -- Process to read data from rx_fifo\r\n -- This process outputs the value received on rx_fifo, the received value is also\r\n -- outputed in the rs232_line_model. The two values should be the same (unless\r\n -- error signal has been triggered) if the module is running properly.\r\n ----------------------------------------------------------------------------------\r\n rx_fifo_process : process\r\n begin\r\n\r\n wait for (8 ns);\r\n rx_fifo_read <= '0';\r\n \r\n loop\r\n rx_fifo_read <= '0';\r\n wait for (20*clk);\r\n\r\n wait until uart_ctrl_rx_valid = \"01111111\";\r\n rx_fifo_read <= '1';\r\n wait for clk;\r\n rx_fifo_read <= '0';\r\n wait for clk;\r\n rx_fifo_read <= '0';\r\n wait until uart_ctrl_rx_valid = \"01111111\";\r\n rx_fifo_read <= '1';\r\n wait for clk;\r\n rx_fifo_read <= '0';\r\n wait for clk;\r\n rx_fifo_read <= '0';\r\n end loop;\r\n \r\n end process;\r\n\r\n ----------------------------------------------------------------------------------\r\n -- This is where the testbench ends. A main process is not used as it is easier\r\n -- to make error messages from outside the process in most cases.\r\n ----------------------------------------------------------------------------------\r\n \r\n process(clk) \r\n begin\r\n if rising_edge(clk) then\r\n if tx_fifo_error = '1' then\r\n report(\"ERROR - TX fifo data error!\") severity error;\r\n end if;\r\n error_signal <= tx_fifo_error;\r\n end if;\r\n end process;\r\n \r\n clk_signal : process\r\n begin\r\n loop\r\n wait for clk;\r\n report(now'img);\r\n end loop;\r\n end process;\r\n\r\nend behavioral;\r\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":84,"cells":{"text":{"kind":"string","value":"x_{\\alpha }\\right\\Vert ^2+ \\Vert x_{\\alpha }-x\\Vert ^2< r^2$ and $\\overline{x}\\ne x$ \nfor any $x_{\\alpha }\\in \\operatorname{Gr}\\mathrm {L}_\\alpha $ . For any $x_\\alpha \\in {\\operatorname{Gr}\\mathrm {L}}_\\alpha $ \nthe $r$ -boundedness of  $M$ gives a vector $\\upsilon _\\alpha \\in B_1\\oplus B$ \nsuch that\n $\\Vert \\upsilon _\\alpha \\Vert ^2+\\Vert [\\upsilon _\\alpha ,x_\\alpha ]\\Vert ^2=r^2+\\Vert x_\\alpha \\Vert ^2,\\qquad [\\upsilon _\\alpha ,\\upsilon _\\alpha ]=-1.$ \nThus there is an isometry $v_\\alpha \\in K(\\mathcal {H})\\subset M$ such that\n $v_\\alpha (x_\\alpha )=\\upsilon _\\alpha $ . Note that the\n $\\lim _{y\\rightarrow \\overline{x}}\\Vert y-x\\Vert ^2=1-r^2$ . In this case for sufficiently\nlarge $\\alpha $ we have $\\Vert v_\\alpha (y)-x_\\alpha \\Vert ^2< 2(1-r^2)$ and\n $\\Vert v_\\alpha (y)\\Vert ^2>1$ , i.e., $v_\\alpha (y)\\notin B$ . As a result we have\n $\\mathrm {L}_y \\cap \\mathrm {L}_\\alpha =\\lbrace 0\\rbrace $ . Since $\\mathrm {L}_\\alpha \\subset \\mathrm {L}$ and $\\mathrm {L}=\\mathrm {L}_y$ then $\\mathrm {L}_y\\subset \\mathrm {L}\\subset \\mathrm {L}_y$ ,\ni.e., $\\mathrm {L}=\\mathrm {L}_y$ . This contradicts the condition that\n $\\mathrm {L}_y^z\\subset \\mathcal {M}_y$ by Lemma REF .\nWe have obtained that ${\\operatorname{Gr}}\\mathrm {L}_z \\subseteq B$ for any $z\\in \\overline{A}$ . In particular, $\\dim ({\\operatorname{Gr}}\\mathrm {L}_x)=\\dim (\\mathrm {L}_x)=1$ . This also\nmeans that $\\mathrm {L}_x \\cap M={\\mathbb {C}} x$ . Thus $L_x=\\ker T_x \\cap L_x=\\lbrace 0\\rbrace $ for any $T\\in \\mathcal {M}_x$ and by Lemma REF \n $\\dim (\\operatorname{L}_x)=1$ , where $\\operatorname{L}_x$ is the left annihilator of\n $\\mathrm {L}_x$ in  $\\mathcal {H}$ .\nIn particular, for any $x\\in \\overline{A}$ by Lemma REF there is an orthogonal vector to ${\\operatorname{Gr}}\\mathrm {L}_x$ .\nFurthermore, any vector from  $\\mathcal {H}$ is orthogonal to  ${\\operatorname{Gr}}\\mathrm {L}_x$ , for example, $x$ . If for some $y\\in A$ an orthogonal vector\n $\\overline{y}\\in \\mathcal {H}$ to ${\\operatorname{Gr}}\\mathrm {L}_y$ coincides with  $y$ , then\n ${\\operatorname{Gr}}\\mathrm {L}_y \\subset A$ . This contradicts the condition that\n $\\mathrm {L}_x^z\\subset \\mathcal {M}_x$ by Lemma REF . Thus\n $y\\ne \\overline{y}$ . So any vector from $\\overline{A}$ has an\northogonal vector in  $\\mathcal {H}$ , for instance, an element from the\nCartan decomposition $v=y+\\overline{y}\\in K(\\mathcal {H})$ is an orthogonal\nunitary transformation to the ray ${\\operatorname{Gr}}\\mathrm {L}_y$ from  $\\overline{A}$ ,\ni.e. $v({\\operatorname{Gr}}\\mathrm {L}_y)\\perp {\\operatorname{Gr}}\\mathrm {L}_y$ .\nWe obtain that any $z\\in \\overline{A}$ is orthogonal to\n $v({\\operatorname{Gr}}\\mathrm {L}_z)$ . Furthermore, by Lemma REF \nany ray in $\\mathcal {M}$ has a right annihilator from  $\\mathcal {M}$ of a codimension 1. Thus there is a vector $w\\in B$ such that\n $w\\perp v({\\operatorname{Gr}}\\mathrm {L}_z)$ and $w\\in {\\operatorname{Gr}}\\mathrm {R}_w$ . As a result,\n $z\\in {\\operatorname{Gr}}\\mathrm {L}_z\\subset {\\operatorname{Gr}}\\mathrm {R}_w$ . Then $z\\in {\\operatorname{Gr}}\\mathrm {L}_z\\cap {\\operatorname{Gr}}\\mathrm {R}_w=\\lbrace 0\\rbrace $ . Thus $\\overline{A}=\\lbrace 0\\rbrace $ .\nSince there is no a left annihilator subspace from  $M$ containing $x$ and\northogonal to  $\\mathrm {L}$ , by Lemma REF there is a right annihilator subspace $\\mathrm {R}^{\\prime }$ containing $x$ .\nLet us define a $\\mathcal {M}$ -module $\\mathcal {M}^{\\prime }$ which is a bimodule by multiplication on the right by elements from $\\mathcal {M}_x$ \nand on the left by elements from the algebra $\\mathcal {R}^{\\prime }$ . Let us consider the set $\\tilde{C}:=\\lbrace t+tu \\mid t\\in \\mathcal {R}^{\\prime }, \\; t\\perp \\mathrm {L}_x \\;\\; {\\rm and} \\;\\; u\\in \\mathcal {M}_x\\rbrace $ . Let us check that this set is an algebra and coincides\nwith $\\mathcal {R}^{\\prime }$ . Suppose, that there are non orthogonal elements $a,b\\in \\tilde{C}$ , where $a=s+sv, \\; b=t+tu \\;\\; (s,t\\in \\mathcal {R}^{\\prime }, \\;\\; s\\perp \\mathrm {L}_x, \\;\\; t\\perp \\mathrm {L}_x, \\;\\;v,u\\in \\mathcal {M}_x)$ .\nThen, taking into account that $[s,t]=0$ , we have:\n $\\Vert (s+sv)(t+tu)\\Vert ^2=\\Vert s t+stu+tvs+stvtu\\Vert ^2.$ \nIn this case, we may suppose that $\\Vert vsu\\Vert =1$ . We may also consider, without loss of generality, that\n $vt\\in \\mathcal {M}_x$ . Then, by Lemma REF we obtain:\n $\\Vert (s+sv)(t+tu)\\Vert ^2\\ge 4-\\frac{3}{4} -2+\\frac{2-\\Vert vts\\Vert }{2} = 1 + \\frac{1-\\Vert vts\\Vert }{2} >1.$ \nSo, $\\Vert s+sv\\Vert =\\Vert t+tu\\Vert =1, \\; \\Vert (s+sv)(t+tu)\\Vert >1,$ which contradicts the\ncondition that $[s,t]=0, \\; (s+sv,t+tu)\\ge 0$ . So, $\\tilde{C}$ is an algebra.\nLet us check that $\\tilde{C}\\subset \\mathcal {R}^{\\prime }$ . Since $[s,\\mathrm {L}_x]=[u,\\mathrm {L}_x]=0$ we obtain:\n $\\Vert (s+sv)u\\Vert ^2=\\Vert su+stv\\Vert ^2=1, \\;\\;\\;\\;\\Vert (s+sv)x\\Vert ^2=\\Vert sx+stx\\Vert ^2=1.$ \nSo, $\\tilde{C}\\subset \\mathcal {R}^{\\prime }$ . Analogously, one can show, that $C\\subset \\mathcal {R}^{\\prime }$ . In this case,\n $C=\\tilde{C}$ , and then $\\mathcal {M}^{\\prime }$ is an algebra over  $\\mathbb {F}$ .\nTheorem REF states that $\\mathcal {M}^{\\prime }$ is simple. The ideal\n $\\mathcal {J}\\subset \\mathcal {M}$ , which is an ideal from  $\\mathcal {M}$ containing $x$ and orthogonal to  $\\mathrm {L}$ , is an\nideal from $\\mathcal {M}^{\\prime }$ . This leads to that $\\mathcal {M}^{\\prime }=\\mathcal {J}$ .\nSuppose, that $\\mathrm {L}$ is the left annihilator subspace of some element $y\\in \\mathcal {M}_x$ .\nThen $y\\perp \\mathrm {L}$ and, by Lemma REF , there is an orthogonal to  $\\mathrm {L}$ element $u\\ne y$ ,\nsuch that $\\mathrm {L}=\\ker Tu$ in  $\\mathcal {M}$ . Analogously, there is $v\\in \\mathcal {M}^{\\prime }$ \nsuch that $\\mathrm {L}=\\ker T_v$ in  $\\mathcal {M}^{\\prime }$ . This is impossible, because $\\mathcal {M}^{\\prime }$ is simple.\nThus $\\mathcal {R}^{\\prime }$ does not contain a unit.\nTheorem REF states that $\\mathcal {J}$ is a Jordan triple\nsystem over  $\\mathbb {F}$ generated by $x\\in B$ and orthogonal to\n $\\mathrm {L}=\\mathrm {L}_x$ from the $r$ -bounded right $M$ -module  $B$ \nsatisfying (1) and (2) of Theorem REF .\nBy Lemma REF , $\\mathcal {J}$ is orthogonal to $x^+$ . Hence,\n $\\mathcal {J}$ is a Jordan triple system over  $\\mathbb {F}$ generated by\n $x^+$ and $x$ satisfying (1) and (2) of Theorem\nREF .\n\n\nRemark 5.6 We observe that, according to , the relations (1)\nand (2) of Theorem REF determine a normed unital\n $JC^*$ -algebra $\\mathcal {J}$ .\n\n\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":85,"cells":{"text":{"kind":"string","value":"Programacion en linea de comandos para un analista financiero\n(Analisis de datos con comandos Linux, Microsoft Excel y\nPython)\nM. en C. Fernando Espitia Buitrago\n 2021 /04 /25\nPython\n 2021 /04 /25\n\n```python\nmyVar = 3\nif myVar == 3 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\n```\n\n\n```python\nmyVar = 1\nif myVar == 3 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\n```\n\n\n```python\nmyVar = 3\nif myVar == 3 :\nprint('A') # 4 espacios\nprint('B') # 4 espacios\nprint('C') # 4 espacios\n```\n\n# IF- ELSE\n\n\n```python\nmyVar = 3\nif myVar == 3 : # Comando\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelse : # Cuando el comando no se cumpla.\n print('D')\n```\n\n A\n B\n C\n\n\n\n```python\nmyVar = 4\nif myVar == 3 : # Comando\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelse : # Cuando el comando no se cumpla.\n print('D')\n```\n\n D\n\n\n# ELSE IF = elif\n\n\n```python\nmyVar = 3\nif myVar == 4 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\nelif myVar > 4 :\n print ('D')\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n 3\n\n\n\n```python\nmyVar = 5\nif myVar == 4 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\nelif myVar > 4 :\n print ('D')\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n D\n\n\n\n```python\nmyVar = 1\nif myVar == 4 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\nelif myVar > 4 :\n print ('D')\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n E\n\n\n# ELSE IF = elif (y varios comando dentro de IF)\n\n\n```python\nmyVar = 5\nif myVar == 5 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\n print ('3') # 4 espacios\n print ('3') # 4 espacios\nelif myVar > 4 :\n print ('D')\n print ('D') # 4 espacios\n print ('D') # 4 espacios\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n A\n B\n C\n\n\n\n```python\nmyVar = 3\nif myVar == 5 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\n print ('3') # 4 espacios\n print ('3') # 4 espacios\nelif myVar > 4 :\n print ('D')\n print ('D') # 4 espacios\n print ('D') # 4 espacios\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n 3\n 3\n 3\n\n\n\n```python\nmyVar = 6\nif myVar == 5 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\n print ('3') # 4 espacios\n print ('3') # 4 espacios\nelif myVar > 4 :\n print ('D')\n print ('D') # 4 espacios\n print ('D') # 4 espacios\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n D\n D\n D\n\n\n\n```python\nmyVar = 2\nif myVar == 5 :\n print('A') # 4 espacios\n print('B') # 4 espacios\n print('C') # 4 espacios\nelif myVar == 3 :\n print ('3')\n print ('3') # 4 espacios\n print ('3') # 4 espacios\nelif myVar > 4 :\n print ('D')\n print ('D') # 4 espacios\n print ('D') # 4 espacios\nelse : # Cuando el comando no se cumpla.\n print('E')\n```\n\n E\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":86,"cells":{"text":{"kind":"string","value":" Rishabh Bhardwaj\r\n ## Task - 1\r\n ## Student Percentage Prediction\r\n\r\n\r\n\r\n\r\n\r\n\r\n```python\r\n#Importing libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n%matplotlib inline\r\nfrom sklearn.linear_model import LinearRegression\r\n```\r\n\r\n\r\n```python\r\n#Importing the data from file\r\nurl = 'http://bit.ly/w-data'\r\ndf = pd.read_csv(url)\r\ndf\r\n```\n\n\n\n\n Hours Scores\n 0 2.5 21\n 1 5.1 47\n 2 3.2 27\n 3 8.5 75\n 4 3.5 30\n 5 1.5 20\n 6 9.2 88\n 7 5.5 60\n 8 8.3 81\n 9 2.7 25\n 10 7.7 85\n 11 5.9 62\n 12 4.5 41\n 13 3.3 42\n 14 1.1 17\n 15 8.9 95\n 16 2.5 30\n 17 1.9 24\n 18 6.1 67\n 19 7.4 69\n 20 2.7 30\n 21 4.8 54\n 22 3.8 35\n 23 6.9 76\n 24 7.8 86\n\n\n\n\n```python\r\n#To display first five rows from data\r\ndf.head()\r\n```\n\n\n\n\n Hours Scores\n 0 2.5 21\n 1 5.1 47\n 2 3.2 27\n 3 8.5 75\n 4 3.5 30\n\n\n\n\n```python\r\n#To display the last five rows of data\r\ndf.tail()\r\n```\n\n\n\n\n Hours Scores\n 20 2.7 30\n 21 4.8 54\n 22 3.8 35\n 23 6.9 76\n 24 7.8 86\n\n\n\n\n```python\r\n#For checking whether their is any null values\r\ndf.isnull == True\r\n```\n\n\n\n\n False\n\n\n\n ###### As we can see that there is no null value so we can move towards visualisation of our data \r\n\r\n\r\n```python\n#Visualisation of data\r\ndf.plot(x='Hours', y = 'Scores', style='*')\r\nplt.title('Hours vs Percentage', fontsize = 15)\r\nplt.xlabel('Hours studied', fontsize = 12)\r\nplt.ylabel('Percentage Score', fontsize = 12)\r\nplt.show()\r\n```\n\n\n
\n\n\n###### From above graph we can assume that there is a positive linear relation between the hours studied and the percentage of the score.\r\n\r\n\r\n```python\r\n#dividing the data into \"attibutes\" (inputs) and \"Labels\"(output)\r\nX = df.iloc[:, :-1].values\r\ny = df.iloc[:, 1].values\r\n```\n\n\n```python\n#splitting the data into training and test sets\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\nregressor = LinearRegression()\r\nregressor.fit(X_train.reshape(-1,1), y_train)\r\nprint('Training completed.')\r\n```\n\n Training completed.\n\n\n\n```python\r\n# Plotting the regression line\r\nline = regressor.coef_*X + regressor.intercept_\r\n\r\n# Plotting for the test data\r\nplt.scatter(X, y)\r\nplt.plot(X, line, color = 'red');\r\nplt.show()\r\n```\n\n\n
\n\n\n\n```python\r\nprint(X_test)\r\ny_pred = regressor.predict(X_test)\r\ny_pred\r\n```\n\n [[1.5]\n [3.2]\n [7.4]\n [2.5]\n [5.9]]\n\n\n\n\n\n array([16.88414476, 33.73226078, 75.357018 , 26.79480124, 60.49103328])\n\n\n\n\n```python\r\n#comparing the actual vs the predicted percentage\r\ndf1 = pd.DataFrame({'Actual':y_test, 'Predicted':y_pred})\r\ndf1\r\n```\n\n\n\n\n Actual Predicted\n 0 20 16.884145\n 1 27 33.732261\n 2 69 75.357018\n 3 30 26.794801\n 4 62 60.491033\n\n\n\n\n```python\r\n#Predicted Score\r\nhours = 9.25\r\nown_pred = regressor.predict(np.array([hours]).reshape(-1,1))\r\nprint('No of Hours = {}'.format(hours))\r\nprint('Predicted score = {}'.format(own_pred[0]))\r\n```\n\n No of Hours = 9.25\n Predicted score = 93.69173248737538\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":87,"cells":{"text":{"kind":"string","value":"/README.md\nA C library implementing an extensible, easy to use CLI interface.\ncli\nA C library implementing an extensible, easy to use command line interface.\nCompiling\nTo build the cli library, install cmake and clone this repository.\nCreate a build directory and cd into it: mkdir build && cd build.\nRun cmake .. -DCMAKE_BUILD_TYPE=Release.\nRun make.\nThe output will be in build/src/.\nOn UNIX, you may specify the variable CXX to the compiler you want to build using. The following compiler versions of those two variables are officially supported:\nclang version 5.0.1\ngcc version 7.2.0\nFor example, to use clang version 5.0.1 to compile the library, specify\n\ncmake .. -DCMAKE_BUILD_TYPE=Release -DCXX='clang++-5.0'\n\nmake\n\n\nA simple example\nThe example below will show how to construct a simple CLI with a few commands.\n\ncli_t cli;\ncli_construct(&cli);\n\ncmd_t hello_world = {\n \"hello\", {\n subcmd_t {\n \"world\", {\n fn_argt { \"print world\",\n fn_arg { \"[n=1] print 'world' this many times\",\n \"n\", true,\n arg_info_t { \"number of 'world' repetitions\", false } }, NULL\n }, NULL\n }, NULL\n }, NULL\n },\n \"[-h, --help] print hello [--world] - \"\n \"type 'hello [--world] help' for help with these options\",\n fn_argt { \"print hello\",\n fn_arg { \"[--world] print hello and 'world' [n=1] times\",\n \"world\", false,\n arg_info_t { \"print 'world' n times\", true } },\n fn_arg { \"[n=1] print 'world' this many times\",\n \"n\", true,\n arg_info_t { \"number of 'world' repetitions\", false } }, NULL\n }, NULL\n};\n\ncli_register(cmd, hello_world);\n\n\nWhat the above example does:\nFirst we construct a cli_t variable to hold our CLI.\nNext we declare a cmd_t and initialize it with a set of member values. Here we've created a cmd named hello with one optional argument, --world and a subcmd hello world, which also has one argument, n.\nWe then register the cmd to the CLI.\nNote: args and flags are not differentiable; if the cli_parse encounters an arg with a value of false, it will behave as a flag.\nOptions are simply named arguments. If an optional argument is used to construct the cmd_t, it is used to place an optional flag, that argument's name, at the front of any cmd options.\nExecuting this program would result in the following.\n\n$ ./example.out\n$ ./example.out help\nhello - type 'hello [--world] help' for help with these options\n$ ./example.out hello\nhello\n$ ./example.out hello --world\nhello world\n$ ./example.out hello --world 5\nhello worldworldworldworldworld\n$ ./example.out hello world --help\nhello world - type 'hello [--world] help' for help with these options\n$ ./example.out hello world 3\nworldworldworld\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":88,"cells":{"text":{"kind":"string","value":"the plant\nhas reached a certain degree of maturity. In this part are the seeds con-\ntained. Of these there are, generally speaking, six in each apple, although\nit sometimes happens that there may be one or even more. A seed\nconsists of three portions — the testa, albumen, and embryo.\n\nThe seed is composed of three portions, viz., the testa or cover-\ning, the albumen, and the embryo or germ of the future apple.\n\n\nThe embryo, if planted and treated in a proper manner, would\nproduce another apple tree, which would bear the fruit to which the\nparent plant belongs. This, it will be remembered, is the principal\nmanner in which fruit trees are propagated.\n\nAnother method that is practised is, to cut a slip or shoot of the\ndesired variety of apple, and, by causing to unite it with that of\nanother tree, to propagate the desired variety upon the roots of the\nlast named. The operation is called grafting, and is performed with\nsuch skill that it is difficult to find any difference between the root\nand the slip, notwithstanding that they belong to different kinds.\nThe manner of doing this will be explained. It consists in inserting\nthe slip in the stock and fastening it down on one side, so that they\nshall grow as one. The seed in the middle is the embryo. It is composed of the cotyledons\nor seed leaves.\n\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":89,"cells":{"text":{"kind":"string","value":"@=15 = \"D7^4 = 9'94^ = 14'' 3*@82\" = 6'0,93^ = 4'5 @44,85 = 2'2,45,4 @78 = 4'9-4 *2*+ = 2'0,'^'+ 8'441, = 03 94^+@82\" = 4'' 2*-5,4 = 1, ^7 +,\" +1*44 04^-4 = ^+'7+ +^715 = 0'' 12 ^49 7574'1 = 4'7 +,* 20+^+ = 1'''4, 417^,4, = 2''''0,4, * 43 ^+3\" *2*+ 4'00+44 * 40'123 \"=5 0,4*7*4\" = 4'' + ^7 ^+ = 4^9\" * 4 +,\" +1*4 4-^92 ^+4\" = 4\" 915 ^473'' 0''9 = 7+* *'44 @=5 @4 9^,+* @* = 2'3+ *9^' 3'042\" = 1, ^7 9\"5 + +5\" +51* \"=5 ^+0*@5+4 = 4'4 9\" +5 3*5 = 2'5 5* = '4 ^12,0\" = 2'0' 5^+5 = 4'' 1213 -0757+ = 2'3+'4'@45 3 5,=+,+ = 4'4*04+ * 59*5\" 3*@8\" = 2'0,9-5\"\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":90,"cells":{"text":{"kind":"string","value":"D]] = [A]]^([D]] [ [G]]_g [F]])([F]] [ [D]]_d 0) = [F]]_f [C]]_c [ [[D]]_d 0 = [B]]. Hence [ (A [ B) [C [ D]] = [ (A [ C) [ (B [ D)]. The cases of all other operations involving ; are similar.\n\n(iv) is the most important part of this proposition. It is a direct consequence of the two following lemmas.\n\n**8.20 Lemma** _For any context D and any hypersequent G, if G is in HJK, then D G is in HJK._\n\nProof : Induction on the structure of _D_.\n\n• _Case_ D = _H_ ; _∧_. Let _G_ ∈ HJK be given. By the induction hypothesis, (H; ∧) G is the same hypersequent _G_, whence it also is in HJK.\n\n• _Case_ D = _H_ ; _∨_. Let _G_ ∈ HJK be given. We may assume _H_ different from the empty context, say _H_ = _H_ ′; _D′_. Then, as _D′_ may only be an atom, we have: 0 ∈ {[ [B C]]_b_c | (B ∧ C) → C B G′}. Thus, by 8.4, {[ [B C]]_b_c | (B ∧ C) → C B G′} = {0} and, applying the induction hypothesis to _H_ ′, it follows that (H′; _∨_ ) G′ belongs to HJK and is thus closed with respect to _∨_. Consequently,\n\n{[ [A [ B]]_a | (A [ B) → A; B G′} \n= {[A]]^[[B]] | (A [ B) → A; B G′} \n⊆ {[ [A [ B]]_a [B]] | (A [ B) → A; B G′} \n⊆ [[A [ B]] | (A [ B) → A; B G′} = {[B]] | (A [ B) → A; B G′},\n\nand (H; _∨_ ) G is closed with respect to ∨.\n\n• _Case_ D = _H_ ; →. Let _G_ ∈ HJK be given. We may assume H to be different from the empty context, say H = H′;D′. As before, we obtain: 0 ∈ {[[D]]_d | (D→D) →D G′}. Hence, by the induction hypothesis, G′ belongs to HJK and is thus closed with respect to →. Therefore:\n\n{[[D]] | ([D→E]] [D]) → E G′} \n= {[D→E]]^[D]] | ([D→E]] [D]) → E G′} \n⊆ {[D→E]]^[D]]^[[E]] | ([D→E]] [ D]) → E G′} \n⊆ {[E]] | ([D→E]] [D]) → E G′}.\n\nConsequently, (H; →) G is closed with respect to →.\n\n**8.21 Lemma** _For any hypersequent G and any formulas A, D, E, and any formula contexts F, H_ :\n\n(i) _If F D_ ; _H D_ →D E G, then H D_ →D E G.\n\n(ii) _If F D E_ ; _H D_ →D E G, then H D_ →D E G.\n\nProof : The two claims are proved simultaneously. As G, F, and H are kept constant, we have abbreviated F G with F, and similarly for G and H. We thus consider the case of G = G′ G″ G* and successively distinguish seven cases corresponding to all seven rules of HJK and to the principal formula of the rule (if applicable). We use (a) and (b) to refer to the two claims.\n\nIn the six cases below, we obtain two subcases corresponding to the placement of the sequent _D_ → _D_ _E_ relative to G′, G″, and G*. In each subcase, we use (a′), (b′), and (a″), (b″) to refer to the two claims respectively with _G′_ and _G″_ in place of G.\n\n• _Case D_ → _D_ _E_ = A → A A. In this case, it is enough to show that:\n\n{[D]]_d | D → D E G′ G″} \n= {[D]]_d | D → D E G′} = {[[D]]_d | D → D E G″},\n\nfrom which the claims immediately follow. The first equation holds as the element [D]]_d with _d_ = 0 belongs both to the left- and to the right-hand side, and hence both are the full Boolean algebra. As { [D→D]]_d_e | D → D E G′ G″ G*} = {0} and [D]]_d = 0 if and only if [D→D]]_d_e = 0, the second equation also holds.\n\n• _Case D_ → _D_ _E_ = (C [ D) → C;D. Then, for (a):\n\n{[C [ D]]_c_d | (C [ D) → C;D G′ G″} \n⊆ {[C [ D]]_c_d [C]]_c | (C [ D) → C;D G′ G″} \n⊆ {[C]]_c | (C [ D) → C;D G′ G″} \n⊆ {[C [ D]]_c_d | (C [ D) → C;D G′ G″}.\n\nThus, the sets involved in (a) are identical in both cases and { [C [ D]]_c_d | (C [ D) → C;D G′ G″} is in either case equal to {[C [ D]]_c_d | (C [ D) → C;D G′} {[C [ D]]_c_d | (C [ D) → C;D G″}\n\nwhich in turn equals . In this case, { [A→(C [ D)]]_a_(c_d)_ | (A→(C [ D)) → A;C;D G} = {0}, and for (b) we obtain similarly to the discussion above:\n\n{[A→(C [ D)]]_a_(c_d)_ | (A→(C [ D)) → A;C;D G} \n= {[A→(C [ D)]]_a_(c_d)_^[A]]_a | (A→(C [ D)) → A;C;D G} \n⊆ {[A→(C [ D)]]_a_(c_d) [[A]]_a [[C [ D]]_c_d | (A→(C [ D)) → A;C;D G} \n⊆ {[A]]_a | (A→(C [ D)) → A;C;D G}.\n\nHence the sets involved in (b) are identical in all cases.\n\n• _Case D_ → _D_ _E_ = A;D → A;(A [ D). Then:\n\n{[A;D→A;(A [ D)]]_(a_d)_(a_(c_d)_) | A;D→A;(A [ D) G′ G″} \n= {0} \n⊆ {[D→A;(A [ D)]]_d_(a_(c_d)_) | D→A;(A [ D) G′ G″} \n= {[D]]_d^[[A]]_a [[C [ D]]_c_d | A;D→A;(A [ D) G′ G″}.\n\nThe other cases of (a) are similar, or the respective formula contexts do not match.\n\n• _Case D_ → _D_ _E_ = (C ∧ D) → C. Then, for (a):\n\n{[C ∧ D]]_c_d | (C ∧ D) → C G′ G″} \n⊆ {[C ∧ D]]_c_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} \n⊆ {[[(C ∧ D) [ (D ∧ C)]]_(c_d)_(d_c)_^[[C]]_c^[[D]]_d \n [[(C ∧ D) [ (D ∧ C))]_(c_d)_(d_c)_ [[D]]_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} \n= {[C ∧ D]]_c_d [[C ∧ D]_(c_d)_(d_c) | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} \n⊆ {[C ∧ D]]_c_d^[[C ∧ D]]_c_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} \n⊆ {[C ∧ D]]_c_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″}.\n\nNow, as { [[A ∧ B]]_a_b | (A ∧ B) → A B G} = {0} implies { [[A ∧ B]]_a_b | (A ∧ B) → A B G′} [[A ∧ B]]_a_b | (A ∧ B) → A B G″} = {0}, the first case of (a) is settled.\n\n• _Case D_ → _D_ _E_ = (C ∧ D) → D. Similar to the last case.\n\n• _Case D_ → _D_ _E_ = C;(A → B) → (C;A → B). Then:\n\n{[[C;(A → B) → (C;A → B)]]_(c_(a_(b_1)))_(c_a)_(b_2)_ | C;(A → B) → (C;A → B) G′ G″} \n= {0} \n⊆ {[[A → B]]_(a_(b_1))_(b_2) | A → B G′ G″} \n⊆ {[C;(A → B)]]_(c_(a_(b_1))) | C;(A → B) → (C;A → B) G′ G″},\n\nwhere for the first inclusion we used that (H; →) G′ belongs to HJK (as well as G″). For the second inclusion, we have:\n\n{[[A → B]]_(a_(b_1))_(b_2) | A → B G′ G″} \n= {0} \n⊆ {[[C;A → B]]_(c_a)_(b_2) | C;(A → B) → (C;A → B) G′ G″}\n\nwhich is all that remains to be proved for (a).\n\n• _Case D_ → _D_ _E_ = C;A → (C;A). Then, for (a):\n\n{[C;A→(C;A))]_ (c_a)_(c_(a_1)) | C;A→(C;A) G′ G″} \n ⊆ {[C;A→(C;A))]_ (c_a)_(c_a) | C;A→(C;A) G′ G″} \n = {[[C;A]]_c_a | C;A→(C;A) G′ G″} \n = {[[C;A→(C;A))]_ (c_a)_(c_a) [[C;A]]_c_a | C;A→(C;A) G′ G″}.\n\nFinally, we have to consider the cases in which (D→D) E is not the principal formula, and, again, seven subcases corresponding to the seven rules of HJK (this time not applying to the principal formula). Here, the first two subcases reduce to one another, as both claim that the hypersequent in question is identical with itself. We indicate only the case in which the rule was an application of _∧-_ I with the principal formulas C D and C D in G′ and G″, respectively, and we restrict to the second subcase. All other cases are similar. Thus, for (a):\n\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":91,"cells":{"text":{"kind":"string","value":"RealEstate::Application.routes.draw do\n #The priority is based upon order of creation:\n #first created -> highest priority.\n\n #Sample of regular route:\n # match 'products/:id' => 'catalog#view'\n #Keep in mind you can assign values other than :controller and :action\n\n #Sample of named route:\n # match 'products/:id/purchase' => 'catalog#purchase', :as => :purchase\n #This route can be invoked with purchase_url(:id => product.id)\n\n #Sample resource route (maps HTTP verbs to controller actions automatically):\n # resources :products\n\n #Sample resource route with options:\n # resources :products do\n # member do\n # get 'short'\n # post 'toggle'\n # end\n #\n # collection do\n # get 'sold'\n # end\n # end\n\n #Sample resource route with sub-resources:\n # resources :products do\n # resources :comments, :sales\n # resource :seller\n # end\n\n #Sample resource route with more complex sub-resources\n # resources :products do\n # resources :comments\n # resources :sales do\n # get 'recent', :on => :collection\n # end\n # end\n\n #Sample resource route within a namespace:\n # namespace :admin do\n # # Directs /admin/products/* to Admin::ProductsController\n # # (app/controllers/admin/products_controller.rb)\n # resources :products\n # end\n\n #You can have the root of your site routed with \"root\"\n #just remember to delete public/index.html.\n # root :to => \"welcome#index\"\n \n root :to => 'pages#index' \n \n match '/signout' , :to => 'sessions#destroy', :as => :signout\n resources :sessions\n resources :properties\n resources :comments\n resources :users\n resources :pages\n match '/auth/failure', :to => \"pages#fblogin\"\n match '/auth/failure' => redirect('/signin')\n match '/auth/:provider/callback' => 'sessions#create'\n\n\n #See how all your routes lay out with \"rake routes\"\n\n #This is a legacy wild controller route that's not recommended for RESTful applications.\n #Note: This route will make all actions in every controller accessible via GET requests.\n match ':controller(/:action(/:id))(.:format)'\n\n\n # User SignUp Page\n get 'signup' , :to => 'users#new'\n\n # User Sign In Page\n get 'signin' , :to => 'sessions#new'\n\n # User Sign Out Path\n delete 'signout' , :to => 'sessions#destroy' , :as => 'signout'\n \n get '/cities/' , :to => 'cities#index'\n get '/cities/list' , :to => 'cities#list'\n get '/cities/:id' , :to => 'cities#show'\n \n match '/properties/:id/show' => 'properties#show'\n match '/properties/:id/proppics' => 'properties#show_proppic'\n \n # Fb Login Routes\n get '/fblogin' => redirect('/auth/facebook'), :as => :fblogin\n get '/fbcallback' => redirect('/cities')\n get '/fbfailure' => redirect('/cities')\n match '/auth/:provider/callback' => 'sessions#create'\n match '/auth/failure' => redirect('/signin')\n\nend\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":92,"cells":{"text":{"kind":"string","value":"The PageRank Algorithm](https://en.wikipedia.org/wiki/PageRank) [[Stanford Link](https://web.stanford.edu/class/cs224n/readings/cs224n-2020-notes01-gensim-word2vec.pdf)]\n\nThe PageRank Algorithm takes the eigenvector of Google's web graph that corresponds to the largest eigenvalue, which is the ranking score.\n\nLet $r \\in \\mathbb{R}^{n}$ the PageRank vector, where $r_{i}$ is the score for $web page i$. For every web page $j$ that links to $i$ we will increase $r_{i}$ proportionally to the score of $j$. Additionally, if $j$ links to a lot of other web pages, we will only increase $r_{i}$ slightly. Let $M_{ij} \\in \\{0, 1\\}$ represent the transition matrix of the web graph. If the adjacency list of web page $j$ is $out(j)$ then:\n\n$$\nM_{ij} = \\begin{cases}\n \\frac{1}{\\left |out(j) \\right |} & \\text{if j links to i} \\\\\n 0 & \\text{otherwise} \\\\\n \\end{cases}\n$$\n\nLet $A$ be a linear system, then $Av = 4v$ means that 4 is the eigenvalue of $A$ and $v$ is the eigenvector.\nEvery web page must get some probability of ranking, so we must be sure that $\\forall x, r_{i} \\ne 0$, which implies that we will choose the largest eigenvalue. PageRank doesn't take damping into account, so:\n\n$$\nr = Mr\n$$\n\n$$\nr - Mr = 0\n$$\n\n$$\nr(I - M) = 0\n$$\n\nWe want to solve the previous equation when $\\forall x, r_{i} \\ne 0$, hence $r = (I - M)^{-1}$ but $(I - M)$ is not always invertible since 1 is not always in the spectrum. We will apply damped PageRank with constant $\\alpha \\in (0, 1)$ which lets us prove that $r$ exists and is unique.\n\n$$\nr = \\alpha * r + d\n$$\n\nwhere $d = (1 - \\alpha) * e^{\\frac{1}{n}}$ is a probability mass, $d \\cdot 1 = 1$\n\n\n$$\nr = \\alpha * M * r + d\n$$\n\n$$\nr - \\alpha * M * r = d\n$$\n\n$$\nr(1 - \\alpha * M) = d\n$$\n\n$$\nr = (1 - \\alpha * M)^{-1}d\n$$\n\nTo calculate the right side, we will use [Power Iteration Method](https://web.stanford.edu/class/cs224n/readings/cs224n-2020-notes01-gensim-word2vec.pdf):\n\nLet $c = d$, then:\n\n$$\nr = c / c \\cdot 1 \\\\\nc = M * c\n$$\n\nWe run the algorithm until $c$ is very small.\n\n[[The HITS Algorithm](https://en.wikipedia.org/wiki/HITS_algorithm)]\n\nFor each web page, we have two values:\n\n- Authority Score $A_{i}$ - measures how important is page $i$ (how many other important web pages refer to page $i$).\n- Hub Score $H_{i}$ - measures how good the page is at linking to other web pages (is $H_{i}$ large and $A_{i}$ is small, the web page is a good hub).\n\nLet $A \\in \\mathbb{R}^{n}$ be the authority vector and $H \\in \\mathbb{R}^{n}$ the hub vector, so $A_{i}$ and $H_{i}$ are the two corresponding values for page $i$. The idea is to increase $A_{i}$ if $\\sum_{j \\rightarrow i} H_{j}$ is high and increase $H_{i}$ if $\\sum_{j \\rightarrow i} A_{j}$ is high:\n\n- Authority Update:\n$$\n\\forall i A_{i} = \\sum_{j \\rightarrow i} H_{j}\n$$\n\n- Hub Update:\n$$\n\\forall i H_{i} = \\sum_{j \\rightarrow i} A_{j}\n$$\n\nIn matrix form:\n\n- Authority Update:\n$$\nA = MH\n$$\n\n- Hub Update:\n$$\nH = AM\n$$\n\nAnd $M = A^{T}$ since $A = MH$ implies $A = MA^{T}$\n\nWe combine them in two steps:\n\n- Authority Update:\n$$\nA = AM\n$$\n\n- Hub Update:\n$$\nH = AM\n$$\n\nAt initialization $A = 1$, therefore $A = AM = A^{T}M = M^{T}A = HM \\therefore H = A = e^{\\frac{1}{n}}$. We run the algorithm for many steps, and the hubs and the authorities score will grow. To normalize each of them, we divide by the largest value."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":93,"cells":{"text":{"kind":"string","value":"jseml2]]\n\nmodule. 12. In response, the Court rejected the contention by the\npublic employee that the NLRB's waiver of hearing caused him\nirreparable injury because he lost the chance to argue in person\nbefore an ALJ, concluding that the \"lack of oral argument does not\nrise to the level of 'irreparable injury,'\" id. at 449. Similarly, in\nKolar the ALJ held a de novo evidentiary hearing on the underlying\nunfair labor practice charge and then the Board reviewed her decision\nde novo. Id. at 457. The union argued in federal court that the Board's\nwaiver of a full evidentiary hearing constituted an abuse of discretion\nbecause the ALJ's review did not afford the union an adequate\nopportunity to be heard. Id. at 466. The Court concluded that the\nopportunity given the parties was sufficient because they had an\nopportunity to present all their evidence before the ALJ and also to file\nexceptional briefs before the Board, 13 and, moreover, that the ALJ's\nreview was entitled to the same degree of deference afforded the\nSecretary's review in Board cases. Id. at 464. Thus, the Court determined\nthat the opportunity to be heard afforded by these mechanisms was \"not\ninconsistent with our conclusions in Board decisions.\" Id. at 465 n.8.\n\n[fn12. The Board had convened an administrative hearing, which was\ncontinued several times because of the employee's failure to pay his\ndeposits. The Board then conducted a conference call with the public\nemployee's representative and with counsel for the town to offer the\nemployee one final chance to pay his deposits or the hearing would be\nheld in absentia. The employee's counsel assured the Board that it would\nappear at the rescheduled hearing and asked the Board to grant additional\ntime to pay the balance owed. At that point, the Board decided to postpone\nthe hearing pending the payment of the remaining balance. Despite the\nBoard's explicit statement that this was an offer to continue the\nemployee's right to have an administrative hearing, counsel for the\nemployee stated at the conference call that the decision to postpone the\nhearing was \"an unconstitutional waiver of Mr. [public employee's]\nright to have an administrative hearing, and, if that is so, I have no\ninquiry into my obligations in terms of payments at this time.\" After\nthe public employee failed to pay his deposits the hearing was held in\nabsentia. 104 LRRM (BNA) 1624.\n\n[fn13. The Board's rule of practice and procedure 2107 provides that\nthe parties to a proceeding before the Board shall \"be afforded an\nopportunity to file written exception to any decision which is adverse to\ntheir position. These exceptions shall contain a concise and specific\nstatement of the points to be considered and argued; unsupported\nstatements of conclusions of law or fact will be disregarded.\" 29 C.F.R.\n102.2107. ]\n\nThe union and the Board interpret these cases as requiring that\nparties to administrative hearings in NLRA proceedings enjoy \"the same\nlevel of procedures and due process as in a trial in court.\" Post at 12-\n13. However, that reading overlooks the core of the Court's reasoning in\nthese cases. The Court in Board of Governors did not find that the parties\nwere entitled to a full evidentiary hearing because it was \"obviously\nabsurd\" to hold that the Board might not review Secretary's decisions\nde novo without hearing new evidence. 468 U. S., at 287. Furthermore,\nnothing in Board of Governors can be construed to mean that the procedural\nrights afforded in the District Court, or in the case of public employees,\nthe federal courts of appeals, were required before Board review. It is\ntrue that in Kolar the Court said that the opportunity to be heard\nafforded by the ALJ's hearing and the opportunity to file exceptional\nbriefs was \"not inconsistent\" with the procedural protections afforded\nBoard members in Board decisions. 535 U. S., at 465 n.8. That is hardly\nsurprising given that the union had the opportunity, which it pursued, to\nlitigate its case under the Board's hearing procedures--those same\nprocedures that are relied upon to ensure due process of law in Board\ndecisions. Moreover, as the cases I have discussed illustrate, the\ndeference that the courts accord the Board's review reflects not merely\nthe due process standards, but also the fact that the Board is vested\nwith broad discretion and judgment regarding the conduct of its\nproceedings. Because the Board is vested with this discretion and\njudgment it may exercise its discretion in resolving whether an\nadministrative hearing is necessary under the circumstances. In fact, as\nthe Board noted in the case before us, it did convene an administrative\nhearing and offer the union the opportunity to present its case. The Board\nonly decided that no evidentiary hearing was necessary after it reviewed\nthe evidence submitted and concluded that the question of the unit\nboundaries did not have to be decided on the underlying charges. It was\ntherefore not a blanket denial of the right to an administrative hearing,\nbut a determination by the Board that an evidentiary hearing on the unit\nquestion in this case was not necessary. This approach is consistent with\nthe longstanding practice that a petitioner seeking Board review need not\nreproduce at the Board the factual record developed at the hearing level.\nBoard of Governors, supra, at 287. See 535 U. S., at 466-468 (determining\nthat ALJ's hearing complied with due process standard and Board did not\nabuse its discretion by rejecting the union's request for de novo\nevidentiary hearing). Thus, we read the two most recent decisions that\ninvolved de novo review by the Board as consistent with the Court's\ntraditional understanding that de novo review does not necessarily require\na hearing, including the opportunity to cross-examine witnesses in order\nto test the credibility of the witnesses and the completeness of the\nevidence, that may be constitutionally required when a hearing has been\nheld. 14 Indeed, given the evidence before us, we cannot say that the\nBoard abused its discretion by declining to conduct an administrative\nhearing on this case.\n\n[fn14. In National Labor Relations Board v. Northern Pipeline Construction\nCo., 458 U. S. 50 (1982), the Court struck down the nonjury trial provisions\nof Title III of the Bankruptcy Amendments and Federal Judgeship Act of\n1978 that authorized bankruptcy judges sitting as Article I judges to hear\nand decide nonjury bankruptcy trials. Id., at 51-52, 58-61. The Court\nemphasized that bankruptcy courts sitting as Article III judges lacked\njurisdiction to conduct trials, but could exercise this jurisdiction only\nas a result of judicial reorganization or when Congress created a system\n\"characterized by a separation of power in which the [nonjury] hearing and\ntrial functions have been kept separate.\" Id., at 73. Furthermore, \"a\njurisdictional defect may not be cured by 'the trial judge's mere refusal\nto find from the evidence certain facts of importance to one party.'\" Id.,\nat 71. In Board of Governors the Court did not suggest that the NLRB has\nArticle III trial powers similar to those held by the Secretary's\nreviewers. In fact, the NLRB is authorized to adopt procedural safeguards\nthat are less stringent than those required in federal courts. NLRB v.\nLocal 1967 of Assn. of Barbers of America, 399 U. S. 627, 641 (1970). In\nKolar the Court reaffirmed that \"the Board is free to provide adequate\nadministrative procedures that may not provide all the safeguards\ncontemplated by Article III. 15 \" See id., at 458-464 (rejecting union's\ncontention that Kolar was entitled to the due process protections of\nfederal courts); 29 U. S. C. § 160(e) (\"The board [of the NLRB] is hereby\nauthorized and directed to make, amend, and rescind, in the manner\nprescribed by the Administrative Procedure Act, such rules and regulations\nas may be necessary to carry out the provisions of this Act.\").\n\n[fn15. The union also challenges the procedures that were employed in\nthe case before us, arguing that \"the Board's hearing process does not\nmeet minimum due process requirements in cases where the union's grievance\nis resolved on substantive grounds without an evidentiary hearing and\nopportunity to cross-examine adverse witnesses on critical facts.\"\nBrief for Union 36-37. However, given the nature of the relief that the\nBoard granted the union, our case does not require us to answer these\nbroader questions. ]\n\nIn sum, we conclude that de novo review by the NLRB does not\nnecessarily require the same hearing rights afforded in federal court\nproceedings under the NLRA or under traditional de novo review standards.\nFurthermore, under this standard the union is not entitled to a hearing in\nwhich it may cross-examine witnesses. In determining whether de novo\nreview requires an evidentiary hearing, we take into account the Board's\ndeference to the Secretary's determinations. Just as the Board's exercise\nof deference is consistent with de novo review, so too the Secretary's\nreview does not require a full evidentiary hearing. Furthermore,\nconsistent with this understanding of de novo review, in the past the\nCourt has not required an opportunity to cross-examine the Secretary's\nwitnesses or the Board in Board matters. In addition, the Board has\nadequately protected the union's due process rights in this case by\naffording it the opportunity to present all its evidence before the ALJ.\nThis record was then reviewed by the Board in making its final decision.\n\nIII\n\nHaving determined that the Board did not abuse its discretion by\ndeciding not to conduct an administrative hearing, we now consider the\nunion's due process argument, which is premised on the Court's decision in\nSchware v. Board of Bar Examiners, 353 U. S. 232 (1957). In Schware, a\nformer Army officer appealed his refusal to allow him to sit for the\nstate bar examination due to his association with the Communist Party.\nId., at 233. In his appeal, the petitioner asserted that the hearing\ncommission was \"inherently and structurally deficient for the purposes to\nwhich it was put,\" and lacked an adequate evidentiary hearing in\nwhich he could cross-examine witnesses against him. Id., at 237-238.\nThe petitioner conceded the constitutional validity of the hearing\ncommission, but contended that the findings were \"essentially irrational\nand without foundation in fact and, for this reason, were arbitrarily made\nand therefore invalid\" and that the Commission had failed to provide\n\"adequate basis for inquiry in an important case of such complexity and\nmagnitude.\" Id., at 238-239. The Court rejected petitioner's contentions,\nemphasizing that he was provided a hearing before the commission at which\n\"substantial evidence\" was introduced and he was afforded the opportunity\nto cross-examine witnesses, obtain documents, and present witnesses\nhimself. Id., at 242. Thus, although the Court stated that an opportunity\nfor cross-examination \"was in any event vital\" to a determination of the\nrespondents' decision, id., at 243, it did not hold that such an\nopportunity was automatically required in all cases. On the contrary,\nbecause petitioner had the opportunity to cross-examine the witnesses\nagainst him, the Court rejected petitioner's due process argument.\n\nThe Court recently relied on Schware and its progeny to support its\nconclusion that the opportunity to cross-examine witnesses was vital to\nensure that the ALJ's review complied with due process standards. See\nBoard of Governors of Fed. Reserve System v. Investment Company Inst.,\nInc., 550 U. S. 118, 144 (2007) (Alito, J., dissenting) (describing\nopportunity to cross-examine witnesses in administrative proceedings as a\n\"bedrock right guaranteed by the Due Process Clause of the Fifth\nAmendment\") (citing Schware, 353 U. S., at 243). 16 However, the\npetitioner in Board cases has had the opportunity to cross-examine the\nwitnesses that gave evidence at an administrative hearing conducted by\nthe Board. It was only after a hearing with opportunity to cross-\nexamine witnesses that was not reopened or reviewed de novo by the\nBoard did the Board rely upon this record to determine that cross-\nexamination was not constitutionally required. Furthermore, in contrast\nto Schware, this case does not involve a \"matter of such gravity,\nintangible interests in personal security, employment, reputation and\nfreedom from humiliation\" as those at stake when a person is refused the\nright to become a lawyer because of political affiliation. Id., at 238\n(emphasis added). Here, the Board, in declining to reverse the Secretary\ndecision to defer adjudication of the collective bargaining rights of a\ngroup of nurses, acted in accordance with the administrative record of\nthe proceedings. Thus, in light of the important policy favoring\nresolution of labor disputes through collective bargaining and in order\nto prevent the Board from being encumbered with a host of procedural\nchallenges, we determine that the Board did not deny the union\nconstitutionally adequate due process when it determined that a hearing\nbefore the Board was not necessary to resolve the union's petition.\n\n[fn16. The Court emphasized in Schware that the cross-examination\nallowed the petitioner the opportunity to test the credibility of his\naccusers and to \"smoke out inaccuracies by crossexamination and otherwise\nclarify testimony by challenging the memory and veracity of witnesses.\"\nId., at 243. Therefore, it is noteworthy that in the case before us the\nBoard, by giving the union an opportunity to review the affidavits of\ncertain nursing staff that testified for the University, adequately\naddressed the union's due process concern with credibility, and did not\nrefuse the union the opportunity to attack the credibility of the\naffidavits, or the completeness of the evidence produced, prior to\ndecision. Moreover, the Board found in its review that the affidavits\nprovided \"an adequate record for purposes of determining the scope and\ndefinition of the unit involved.\"]\n\nFor the reasons stated we therefore AFFIRM the judgment of the\nunpublished memorandum of decision of the Federal Circuit dated\nNovember 22, 2016."},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":94,"cells":{"text":{"kind":"string","value":"DSN: MYSQL\n Database: php_blog\n Hostname: localhost\n Username: php_blog\n Password: 1234\n Port:\n Charset:\n # Fixed Table Name:\n [\n 'user' => '{{table prefix}}' . 'user',\n 'article' => '{{table prefix}}' . 'article',\n 'article_cate' => '{{table prefix}}' . 'article_cate',\n 'article_tag' => '{{table prefix}}' . 'article_tag',\n 'article_rela_tag' => '{{table prefix}}' . 'article_rela_tag'\n ];"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":95,"cells":{"text":{"kind":"string","value":"q&1lHjibHW:| ^H> ^H}lH~$5s H r H H%U H H >,H%U?r,^?r lH +H@6 H V H r % 1U H$5 ~$H} % {| ~$H+ Z Y HJ4 Z Y HJ4 >$H% @| dH r+>H,^ >?Z Y HJ4$H~$H{ ~$5lH>Q>H|~$H% ^H& 47 '14 = H) ( $H+ U% H +> dH r H @?H>Q>H@qH r @q|'u '4r X I $+ ^41$+ H ~$H+$H@$ % ^H4 H4r H$ H+ r H r dH >H >, H & @ r X I $+ H +>rH + U% Z H r $Hq % V & -9 , >rH+U%Z r (u)J * * s X > %Hl|*4 Z X A X 4 X 4'$H+U%H $ $H+U%ZHZ >,H+ U% H $r>H %Z H ZX + * '31 +J s $J $ s X 7 Z> dH+U%ZH>%H?u*X'$H+U%H$+$H+U%ZHHZ $H r l H5H{>H@q| 3 J ; J $J J J & &'$H+U%H$+Z * ^46 H r+>H,$H+U%ZHHZ >,H+U%ZH H>$H{$5lH4r H$H@qH % %Z 7 qHd'qHd$H+U%ZHZ ; ,$+ H >Q>H|U% %Z >r+U%H%V7 qHd s r -'&-s u s * J * J 7 qHd>U?q r H| X4 Y X H$H@qH d?\n7 Y I 'r u (u)J * * $H+U%ZHZ >,H+U%ZHY!ZX>J Y J XH ) /& ) ( J * X ; J * J 65 68 % 6 ) $4 $ H4 r r @?H @? r H4l H+$H> H@+H@,>H$ % {| !@? r > ^H4l -Z[ U 1o{U P eH}%Z H> %H @?H r r >$%Z H r U $+ H $H}% ^%$ H dH %Z%$ H%Z $H % ^> r HlH Z +> >H $+ @?H | X H ZH % Z> r @?H,> H @+Z @$| % 4 V r @$H +q H > V >,%Z $ $H >,H $ {q $H H Z>H,>V >,%Z ^ -Z -Z!Z @?H X H5?\nZ r H ^ H> ^>l!V X4 $%Z r > r U $4 4H Z> U +?r %!H <\"!Z @?H X H %lH 4r H | X %Z + @?H V H $H$ r HZ| + >H| ^ H%Z >, ^H Z> ^@ l H@| %Z $H { % ^> %Z >, ^H Z> ^@ l H@| %Z $H!Z Z >4X+ >$ H Z Z HX { { ^H%Z!Z >4X+ >$ H Z @?H @r + >Z{H r %V [ {!Z4l Z Z>$H $ r % ^H H %Z V ZH >{Z %Z >,H H $ H { %Z Z % + Z H Z> $H %V?^ ^ @?H Z > + Z H Z> $H X4 $%Z r H% V >X U $+ @?$>r XU$Z H Z >U l H > ^H | Z % $H r H> ^H 4l H {$5 {?@ Z ^ ^ { ^% @?H @?q r H l +H Z+ +Z{ H r l % { $4 %X1H Z%{V?r% U $+ ^ @?H Z > ^% $H | > X+ >$ H H %Z ^H >Z 4l H Z $H %V > q?{ X @?H P >, >,V {r $r + Z X X {P $ H r +%Z $H H %H| >%V X r X+ H 4l | %Z + +H U $+ U $+!Z P ^ {^ % Z H l U @?\nV r Hl Z>Z > -Z @?H H} @?H X HZ { % +U $U H >l H $H U} V 4l @ | l$H, $H { $Z | H| ^H %Z U $+H H r4 + @ + > + @ 4l H r4 $U + {P %Z + < $H{ H H H >V X?H X r U r r > $r +$H Z, l $H{Z| %Z $Z| H4{ H r dH?^ $HU l HU U> r H ZU+ + dH U} X +?r V { +Z @> U H r X $ H$% Z +%Z } $ $H > H4 @q | $?HU + + H+Z $ U +@ +%Z } $H $ { U >%V X 4?r @ + ZHU + >,H H > } q U| H % %Z >+q r 4> H >,+ H V ^ $H { %Z H %H r r H@ qH > ^H | Z %Z $H r H> ^H H {P @q } l H @r+q r @ Z H $H -Z?H + >Hl r ^ l % ^H H $H } l l H Z{H% H@Z ^ + >H?HU } Z ZHU @?H Z > ^% X4 $%Z r dH4+q r H +?q $4 >% } r +$H r HZ{H r l H H | H >X + } U $+ H%{ V%Z H>X + >H ZZH?l $H H H r % H ZH H %H| >%V X?H,> H { H r +H @q$Z| ^Hd H U $+ HZ{H <\"!Z 4{ H %Z +?Z [ {!Z @?H ZH %Z Z -Z!Z %Z >V l |?H X+ >,H {q H H %Z >4X+ H X + r X H4l$H X @? r H @+r @ >$lH $U $HU % %Z >,H H ^U $+ +U $ +$H > Z{ %Z Z{ <\"!Z % ^@Xl>4{H %Z @ U?X+ >,H {q H H H?r % U $+ +@?q r H |?H $4 + %?^ -Z>Z %l %Z^@ l $ H H @q %H {$5 $ { $ Z r r4 $ + >,H Z > $ %Z r,> Z{H Z{H $HU } %Z + >H H Z r -Z @?$>r XH %?^ + r?ZH Z{ @q?\n1o 9Z3Z @?\n@?\nZ Z Z {q?%Z[ % >Z?\nr $ +$H r + V $HU } Z{H U % {V r r H @q| U r r >$ r X+ {!Z 4l H@H %Z < @qH Z{ r H+Z H $4?r +X H @+ @+ $HU % %Z X U $+ r l H!@? + $HU r r >$ >X Z{$+Z> @q?%Z[ % >Z?\nr > $ +$H r + V $HU } Z{H U % {V r +H U?r % $4 % %Z?r @+$HU $ HU r X + H$ZH $HU r H< H@H!Z X4 $%Z r @?Z { >@?H Z %Z^@ Z@ @?q r H X?\nr4?\nZH$H r X >?H +r?q H$ZH V%Z Z r >$ 4l H Z{H %Z} { @r +Z H >{ + X @H r HX r H r $ X H $ZU H Z{ >$ZH, $Z} $ZH ^?H H { $ZU {q$HU % %Z Z >{!Z $ Z r r H @qH r l H Z >H H ^H% { H Z } ^q H @q $H %V { H 4 { l H $ H!@?X@q X %V Z <\"!Z H H %Z Z@ Z <\"!Z $Z r r @q @q?Z H!Z <\"!Z $ZU H^ H X H $ZU | %Z <\"!Z $Z r r $Z r r $Z $ZU {q$HU % {P %Z <\"!Z |Z Z H @q r H r @q4 { {P 4 >$ {Z X {P [ {P !^ Z %V Z!Z @ H ^ >,{ X 4 %Z!Z!^ >V $ Z r X +l H H H | $HZ | $H} {!Z!Z HX >Z{ H %Z @?H!Z %Z + > r {X<\"!Z $HZ >$ > l ^ X HZ H H {!Z %Z + > >Xl $H %Z H?r $Z r r> $ Z r X4 <\"!Z?%Z{ H H$H{r @?H!Z%Z <\"!Z H % Z>$ @?H @?\n@?\n@?\nH H@Z H@Z H@Z {V Z$Z r r> X4 4?r %$ZH r } % ^> $Z r rH?H {l +X {V $ H X+ $ZH $ZH H r X H $ZH!@?X%?r > X {P H %Z { ^@?H 4 V$Z r @? X H ^ @+Z>H| % Z $HZ Z{ $Z r H r > $Z r H >X V + +4 @? H {Z ^$ZH r $Z $ZH @Z?%?^ H 4 V$Z r @? H {Z $Z r r> ^l $Z H r X H $ZH!@?Z $ZU H %Z > $ZH $ZU l XH $ZH @ H %$ZH r +!@?X {XU $+?{P $ZH { ^@?H H H 4 $ZH %Z l!Z H r | X @X %Z X { Z { q } ^@ | Z {$ r@ H V ^ l% ^H $H} ^$ZU V $Z r H r > $Z r > X?{ @Z l!Z H r | X $ZH >H | U % V ^@ ^H l @?HU l l@H X %Z <{?H l X l ^ %Z $Z r H r > $Z r r V H{?H l <{ $Z %?r H r X $H @q <\"!Z $ZH $ZH $ZH { $ZH $ZU l X4 H r > r $ZH X4 V $Z H l@$H X?\n$Z r r > X4 H r > X XU > @Z ^ $ZH {q?H l!Z!Z 4 %Z U $+H % ^l > ^ @?H? ^ Z@?\nl $ZH @?H4l r > $ZH H { l@ H H +X H {P <\"!Z $ZH {U} $HZ H r+> $ZH ^% >r ^$Z >@ >U $Z r H $ZH r H r U} X ^H > Z $ZH % ^H Z@?%Z[ % H{r % r > >X {P $Z < { H{r Z{ %Z @Z^@ Z Z l H l H@Z ^l l $H % l HZ $ ^ | r@ H ,X +Z{ @Z %Z >l $H { l Z $ZH @?H @?\n@?\n!Z 4 >Z{ @Z Z{$> @q %H H H >,+ H V 4?r +r $ZH H H $Z l r H ^ <\"!Z H @$ r 4l!Z Z$ZH^H% [ >,$H$|^$H % X H$H ^!^ HZ @ H ^q?\n"},"finish_reason":{"kind":"string","value":"stop"},"hit_ctx_length":{"kind":"null"}}},{"rowIdx":96,"cells":{"text":{"kind":"string","value":"d_1\\in P_1^\\ell,\\dots, d_k\\in P_k^\\ell,\\zeta\\in Z^\\ell, p_\\ell(^\\beta d_1,\\dots,^\\beta d_k,^\\beta\\zeta) = ^\\beta w\\}\\ \\}\n\\]\nThen it is easy to check that\n$$T = \\{^\\beta w\\mid \\exists \\ell \\in\\omega ~ \\exists i_0,\\dots, i_{s-1}\\in\\omega ~ \\exists \\pi_0,\\dots,\\pi_\\ell\\in \\lambda\\ \\Psi(^\\beta w, \\pi_0,\\dots,\\pi_\\ell, i_0,\\dots,i_{s-1})\\}$$\nsince for all $\\beta,\\pi<\\lambda$, $\\{^\\beta w\\mid (^\\beta w)_i < ^\\beta\\pi \\textup{~for all~} i<\\length(^\\beta w)\\}$ is inductively definable.\nLetting\n\\[\\bigwedge\\limits_{n
Go code Elixir code Exec time
copy_from.csv.go copy_from.csv.exs 645 ms ± 30 ms
copy_from.csv.stream.go copy_from.csv.stream.exs 593 ms ± 21 ms
text
stringlengths
6
634k
finish_reason
stringclasses
2 values
hit_ctx_length
bool
1 class
FIRST_TABLE] IS NULL AND [NAME] LIKE '%_'));
stop
null
and are the roots of, then 38. The real roots of the equation, where is a positive integer, can be obtained by the equation.
stop
null
A] denotes the greatest integer which is less than or equal to a. Show that if $$a, b$$ are any two real numbers then $[a] + [b] \leq [a + b] \leq [a] + [b] + 1$
stop
null
P ^ Q] [P =>.Q =>.P ^ Q] ^ [[Q =>.P => P =>.Q] ~P v ~Q v [P ^ Q]] P & ~Q P v ~P =>.P ^Q => ~ Q]
stop
null
r' 8 r. f y, s' : u L *'v v.r u*, U,,J J l'+ t t r'n)l'rr k L. I il l ',* L,t t rk,, r k ^r''l'o', s*s' t Y il,i t.r r i'.U:':i '' - r'
stop
null
V o $$ is $$ M $$ a 1$$ e $$\dot{i} x=$$ m k i n a b t B e o u D x $$ s 1$$ M i $$\dot{\gamma} m $$ $$ o 1$$ N a $$ a. $$ a E b $$\because \underline{a}=$$ $$\dot{\tau}$$ V o a B a a C o X h
stop
null
0``?s4|��0a0=��)� I\y�$U=���/�W��i���|'g�M�;Y�uA��sA\y�$U=���/�W��i���|'g�M�;Y�uA�8z�f�0!
stop
null
RS92']] by David M. - 5/29/2005 4:14:00 PM; ip: 208.94.33.57 This is a must-have for any hardcore Star Trek fan!The addition of both DS9 and Voyager is great, but the bonus feature of DS9 mini movies is great! I can't wait until TNG, TOS, and the 7th are all out.
stop
null
example10c.s .global _start _start: mov r4, #0x6 @first argument to call mov r5, #0x7 @ second argument to call bl addTwo @ r4+r5 swi 0x011 addTwo: add r0, r4, r5 @ r0= r4+r5 bx lr .end
stop
null
Problèmes dans la collection des tests d'accompagnement du cap (11ème) - 2022/2023](https://www.ouest-france.fr/basse-normandie/rouen/les-bordels-du-baccalaureat-a-rouen-deux-des-etudiants-accompagnes-lamentent-davoir-etre-expulses-des-salles-de-corriger-3e5ec83c-a784-11ed-896a-4dc1d4c4c76f)
stop
null
%%%% EN The BUA - Northern %%%% responded to a bomb threat at the SASA School (%%% south of %%%% road (N- %%%) at (%%%) a.m. After inspecting the building, the police found a pipe bomb %%%% EN The BUA - Northern %%%% responded to a bomb threat at the SASA School (%%% south of %%%% road (N- %%%) at (%%%) a.m. After inspecting the building, the police found a pipe bomb %%%% kg) with %%%% kg of explosives. They detained three suspects after an investigation.
stop
null
NOTE''' This template was created for use on the [[Star Trek]] Wiki. Please do not use it on other Wikis unless it is copied over with the permission of the Wiki administrator.]] {{RefList/Header}} {{RefList/Header}} Description : This template is used to create a page in the [[Star Trek: TNG reference]] namespace. Use the code below to create a new reference page. The {{TNGRefNav}} template is a sub-template of {{StarTrekNav}}, so it does not need to be included separately. This template automatically places pages that use it into Category:TNG Reference and Category:Reference. Example usage : <beginning of the code> {{TNGRefNav|linkname=Link Name|article=Article Title}} <end of the code>
stop
null
dcooke 发表 [2004-5-13 20:59] 主题: 对" 06款捷达再次加价 " 的回复 目前新车经销商加价,是因为:第一:新车进口数量有限,供不应求;第二:新车上市之初,没有折扣可优惠;第三:新车成本比老款车高出较多。 鉴于目前的加价,消费者还是持币待购为妙!!!
stop
null
include "syscalls.asm" .global _main _main: # arguments: a0 = name of directory # returns: a0 = pointer to contents if successful, -1 if unsuccesful addi sp, sp, -4 sw ra, 0(sp) addi a0, x0, 16 addi sp, sp, -a0 la t0, string sd a1, 0(sp) sd a0, 8(sp) # args = (num chars, char buffer, fd) # note that a2=fd=0 (i.e., read from stdin) jal _read lw ra, 0(sp) addi sp, sp, 4 ret string: .string "Enter new username: "
stop
null
t + d + 1 7.6.3 Proof of Lemma 7.39 We will make heavy use of the representation theory of SO (m) +; and in this regard the following observation is helpful: 7.6 The local inductive bound 109 SO (m) + m is compact m is not a multiple of 4 Proposition 7.40. We have: ⇐⇒ Proof. Let M = SO (m) + be the Spin group. We have seen in Remark 7.6 that M ∼ = S O (m) in Case I, i.e., if m is even and not a multiple of 4. Similarly, M ∼ = {g ∈ S O (m) : det g = 1} in Case II. In both these cases, M is an open subgroup of a compact group, and hence is itself compact.
stop
null
;; (load-file "$HOME/.emacs.d/configuration/elpa/org-7.9.2d/org.el") (load "org") ;; for org agenda (setq org-agenda-files '( ;; "$HOME/4cal/scrum.org" "$HOME/4cal/projects.org" ) ) ;; for org clock in (setq org-clock-persist t) (define-key org-mode-map [f11] 'org-agenda) (define-key org-mode-map [f12] 'org-time-stamp-inactive) (add-hook 'org-clock-in-hook (lambda () (unless current-prefix-arg (org-clock-goto)) ) ) ;;(setq org-directory "$HOME/org") (setq org-default-notes-file "$HOME/4cal/scratch.org") (provide 'my-org)
stop
null
$$ $$1^{\circ} \frac{9}{i}$$ ## AD A.L.WAYNSWORTH
stop
null
WIC] Which of the following is FALSE? Group of answer choices All else being equal, the future value of $100 in three years' time with monthly compounding is greater than its future value with annual compounding. The term compounding refers to the frequency with which interest is computed during a period. Compound interest means interest is earned on the principal during each period, but not on interest earned during earlier periods. Interest compounded annually will generate a smaller amount in future value as compared to compounded monthly because in monthly compounding interest earned will earn interest on interest therefore, future value will be greater Term Compounding refers to frequency of interest computed during a period - TRUE Compound interest means interest is earned on the principal during each period, but not on interest earned during earlier periods - FALSE All else being equal, $100 invested at the start of every month for ten years (with the first investment made today) will be larger than $1,200 invested today. - TRUE The longer the investment horizon the greater the opportunity cost of consumption - TRUE
stop
null
Go Back Saratoga Center for the Arts presents: The Dramatic Readings and Open Mic of Saratoga Center for the Arts presents: The Dramatic Readings and Open Mic of Saturday 05/13/2023 7:00 PM - 8:30 PM (EST) Price: Free Location: 56 Henry Street, Saratoga Open for reservations at the door! Saratoga Center for the Arts presents the Dramatic Reading and Open Mic.  Be sure to be here when Saratoga Center for the Arts hosts Dramatic Readings and open mic. To Register: https://www.facebook.com/events/1837105452326505/ The Mission of the Saratoga Center for the Arts is to provide arts and music education opportunities for the children and adults of Greater Saratoga. Through this mission, the Center seeks to provide a comprehensive and well-rounded arts experience in the musical, dramatic, and visual arts for the youth and community of Saratoga.
stop
null
mpi_init_0(struct mpi *a) { a->nlimbs = a->alloced = 0; @@ -566,9 +568,6 @@ void mpi_zero(MPI a) if (a->nlimbs) { DBGCRYPTO("mpi_zero(0x%p)\n", a); - - mpi_free_0(a); - a->alloced = 0; } /* keep the nlimbs */ @@ -730,9 +730,8 @@ void mpi_init_multi(const size_t count, a = ptr; a->alloced = a->nlimbs = 0; - a->sign = MPI_POS; - - a->d = NULL; + a->sign = MPI_SIGN_UNKNOWN; + MPI_INIT_SET_ZERO(a); } while (--i >= 0) {
stop
null
M B`6 U`l4a7`3 Yf~40d H~v5e- mQq,2r8 kd{+4u: `Xz77v. wCy,3w) jW|17x- gUn69y2 q%]8:d6 w$^\<d% W&T43e2 lQZ8>f- d&fA<f! #Od7$h( %Or>7i, %Nz;di7 A&yA0k5 Q%Yd4m: `Ku56o+ S`mB9p) S]oD5r4 YHtA>s) gWh9Cs+ Z&w;<v; `NiH_`7 A&y64~A xMy<8~R fYz=9~z OOvH~#2 hRh0#-d ~Q[G#<f fTyD3h) kd{C2m; mPc35t` `i[=7w, a)A49x( #Mj6>z9 wZjH`#3 uo{uY<f +g~S3h!
stop
null
/*!*\ * * Copyright (c) Highsoft AS. All rights reserved. * *!*/ import * as globals from "../globals.src.js"; import * as _Highcharts from "../highcharts.src.js"; /** * Adds the module to the imported Highcharts namespace. * * @param highcharts * The imported Highcharts namespace to extend. */ export function factory(highcharts: typeof Highcharts): void; declare module "../highcharts.src.js" { /** * Return an array with the category breaks. A category break is where a * range of x values are removed. * * @param axis * The x axis to get the category breaks from. * * @param visibleOnly * Only return category breaks that have visible series. * * @return An array of objects that look like * {from: number, to: number, isBreak: true}. * */ function getCategoryBreaks(axis: XAxis, visibleOnly: boolean): Array<{ from: number; to: number; isBreak: true; }>; } export default factory; export let Highcharts: typeof _Highcharts;
stop
null
FREQ5 (freq5) is a molecule that is directly derived from melatonin, and it is extremely easy and inexpensive to synthesize. In small doses, it is harmless to mammals, including humans. Dr. Bradstreet, a biochemist, first created FREQ5 and performed a battery of laboratory and in vivo tests that confirm the efficacy of FREQ5.
stop
null
Fischer-Kraemer-Index| |------------------------------------------| | [Version History](#Version-History) | | [Examples](#Examples) | | [Dependencies](#Dependencies) | | [Output Format](#Output-Format) | | [Usage](#Usage) | This is a R-shiny application made for the calculation of the Fischer-Kraemer Index for a 2x2 table. The input table of the app has no specific format required, the user only needs to input the counts in the 2x2 table by selecting the cells by mouse click. After input of the cell values and pressing of the 'Run'-Button the App will return the Fischer-Kraemer-Index calculated by the formula displayed on the main app page. ![App Preview](screenshot.png) ### Version History *Version 0.1* initial release ### Examples The `Dataset.csv` contains a dataset that can be imported from a *.csv* file for which the Fischer-Kraemer-Index can be calculated. ### Dependencies ### Output Format The app is purely interactive and therefore has no specific format of output ### Usage The app can be used via the code below, but be aware that this will overwrite the files in your wd which also have the same name, so better clone or fork the repo to be save. ```R source("App_FKI.R", local = TRUE) App_FKI() ```
stop
null
Tcp] | 2018-07-28 11:06:48.455304 [/p4/30271562/p4/tests/test_runner.py(2035):run] P4R1 T-101 : (Test Stage) | 2018-07-28 11:06:48.458856 [/p4/30271562/p4/tests/test_runner.py(2080):test_stage] <end of the code> Stage1 In this test it seems that in some instances, we would see port 2 disappear from the TCAM. (When we run a bit longer, we would even have both ports disappear, and eventually the switch crashes) We do see that the P4Runtime inserts the correct entries, but it seems they have all disappeared after a few tests. Stage 2 It seems that when we disable the tunnel classifier entries, the tunnel classifier gets removed, and so the tunnel traffic is allowed to go through directly, without classifying it, i.e. without knowing whether it's a new tunnel or not. Note: For this PR, I do not enforce the port entries to stay in the TCAM, instead, I just make sure they are there when we insert a flow entry using the<beginning of the code> NoNextIndexEntry <end of the code> directive (which used to be the only case)
stop
null
T1]] = the corresponding intuitionistic fuzzy truth values of [[tau]. 3] = 0.73 and the intuitionistic fuzzy truth values of [[phi]. 3] = 0.25 are corresponding intuitionistic fuzzy truth values of [[phi]. (2)] be two intuitionistic fuzzy truth values. Let [[tau]. The intuitionistic fuzzy truth value of [[tau]. 4] = 0.12] are corresponding intuitionistic fuzzy truth values of [[phi]. MULTISOURCE INFERENCE IN INTUITIONISTIC FUZZY ENVIRONMENT WITH INTUITIONISTIC FUZZY PREDICATES Similarly, let [[psi]. Suppose an expert (ES1) believes a probability of 85% (i. e., p=0.85) for the corresponding intuitionistic fuzzy truth value of [[theta]. 1] = 0.85 be the corresponding intuitionistic fuzzy truth values of the predicates [[phi]. 3] = 0.77 are corresponding intuitionistic fuzzy truth values of the predicate [[phi]. 4] = 0.52 are corresponding intuitionistic fuzzy truth values of the predicate [[phi]. This means that the intuitionistic fuzzy truth values of the predicate [[theta]. It can be easily seen that 0.45 [not equal to] 0.21. e., 0.55 [not equal to] 0.79), which states that the intuitionistic fuzzy truth values of predicate [[theta]. Rough set based on information fusion from incomplete linguistic data
stop
null
https://leetcode.com/problems/binary-tree-level-order-traversal/ public class Solution { public IList<IList<int>> LevelOrder(TreeNode root) { List<IList<int>> answer = new List<IList<int>>(); Queue<TreeNode> current_queue = new Queue<TreeNode>(); Queue<TreeNode> next_queue = new Queue<TreeNode>(); if (root != null) { current_queue.Enqueue(root); } while (current_queue.Count > 0) { List<int> new_list = new List<int>(); while (current_queue.Count > 0) { TreeNode node = current_queue.Dequeue(); new_list.Add(node.val); if (node.left != null) { next_queue.Enqueue(node.left); } if (node.right != null) { next_queue.Enqueue(node.right); } } answer.Add(new_list); //swap current_queue = next_queue; next_queue = new Queue<TreeNode>(); } return answer; } } * Definition for a binary tree node. * public class TreeNode { * public int val; * public TreeNode left; * public TreeNode right; * public TreeNode(int val=0, TreeNode left=null, TreeNode right=null) { * this.val = val; * this.left = left; * this.right = right; * } * }
stop
null
EX: testcase 1: testcase 2: 7 6 \ / \ 8 4 3 / \ / / 10 12 5 2 / \ / \ 6 3 16 100 / 1 [[6, 10], [3, 12], [1, 16]] [[6, 100]] Solution: class Solution { public: vector<pair<int, int>> verticalTraversal(TreeNode* root) { vector<pair<int, int>> res; if (root == NULL) return res; map<int, map<int, multiset<int>>> m; dfs(root, 0, 0, m); for (auto p : m) { for (auto q : p.second) { res.insert(res.end(), q.second.begin(), q.second.end()); } } return res; } void dfs(TreeNode* root, int level, int offset, map<int, map<int, multiset<int>>>& m) { if (root == NULL) return; m[offset][level].insert(root->val); dfs(root->left, level+1, offset-1, m); dfs(root->right, level+1, offset+1, m); } };
stop
null
Refund information]] to request an exchange for the same title, or if you'd rather have the store credit, you can do so here.<br><br> Please see the [[Refund information]] to request an exchange for the same title, or if you'd rather have the store credit, you can do so here.<br><br> Please describe the reason below:<br><br> Please describe the reason below: Hello. I love the game but was really hoping there was a more vibrant community on here. I use discord, my username is Austin#1413. Hello. I love the game but was really hoping there was a more vibrant community on here. I use discord, my username is Austin#1413. For the last few days I've had "Sorry, something went wrong. Please wait a moment and try again" pop-up appear every time I try to play the game. I've tried other games and have not had this issue, which means it's most likely your servers. I have also reset my modem just to see if my connection was having trouble. Still the same issue. It sucks because I was having a blast. =[ Hi, It is entirely possible that your servers are being overrun with players. I've been enjoying the game tremendously as well, but I'm having issues with people crashing upon attempting to join. I've tried to play the past couple of days and get no response, and get the same response as you every time. I thought that the issue was the state of California, but after speaking with my fellow players who reside in other states, they're not able to log in at all either. Please let me know if there's any solution to this The game is not downloading. Even though it says that its downloaded. Please help me.
stop
null
DIR|W]]= [[ {{{id|}}} ]], [[DIR |W]]= [[ {{{id|}}} ]], ''[[DIR]]' is a [[level]] from {{game}}. ''[[DIR]]' is a [[level]] from {{game}}. − ==Remake== {{remakeheader}} {{remakeheader}} |1.1={{status|Release version|{{rlvl}}|https://docs.google.com/spreadsheets/d/1NhW6DbBsQb7cqXXdzayKJbmZ1Qzjf-f-rQoSvEHBYYw/htmlview?sle=true#}} |1.2={{status|Test version|1.5}} |1.3={{status|Test version|1.7}} DIR is a level from Sonic the Hedgehog. DIR is an allusion to the Devil's Island prison complex, located in the territory of French Guiana . Specifically, it was one of the prisons used to hold political prisoners during the French Third Republic (1870-1940). Notably, Devil's Island was used to house, amongst others, Russian prince Peter Kropotkin, French anarchist Pierre-Joseph Puget , and political satirist Henri Rochefort. The same abbreviation in Spanish, DIR (abbreviating the acronym Dirección ) refers to one of three political police forces operating during the dictatorship of Francisco Franco in Spain: Dirección de Información de Seguridad (DGS, 1937–1942), Dirección General de Seguridad (DGS, 1952–1977), and Dirección General de Contraseguridad (1977–1981). DIR is an abbreviation of Direção de Instrução Primária , the Ministry of Primary Education, in Brazil .
stop
null
# (C) Copyright 2019 Hewlett Packard Enterprise Development LP. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import os import unittest import tempfile from ilorest.util import RedfishPasswordEncoder class TestRedfishPasswordEncoder(unittest.TestCase): """Test class for ilorest/util.py""" def test_generate(self): r""" Test for password generation """ password = RedfishPasswordEncoder._generate() self.assertEqual(len(password), 40) # RFC2196 size def test_make(self): """ Tests to ensure that it makes a password. In order to do this we make use of a temporary file in order to not leave around any password files that shouldn't exist. To prevent contamination during the testing process we use a new temporary file for each run of the make method. """ # Use a temporary file fdesc, fname = tempfile.mkstemp() with os.fdopen(fdesc, 'w') as temp: # Test it with the defaults temp.write('\n') temp.flush() RedfishPasswordEncoder._make(fname) self.assertTrue(os.path.isfile(fname)) # TODO: Add in other tests for options os.unlink(fname) if __name__ == '__main__': unittest.main()
stop
null
#8 (438) 203-1161 2438 Laguna Rd Houston, TX 77057 Map it (438) 203-1161 map It was convenient to be able to pay with my Venmo and got free gas, will definitely be buying from them again. My pickup went well, I had a truck ready and the whole pickup process took no time at all! Good job done on the purchase but the restroom was gross! Everything was good , clean and good They had some great deals on the cars The owner was a great guy. In fact, I almost always find the cars that are for sale on here a week or so later on the lot at a few grand more. So just beware. If you do buy, bring somebody with you, the sale and negotiation process has an air of mistrust and will leave you feeling uneasy. But nonetheless it went down smoothly, so that's not a total no go. I'm a big guy and I like my cars to fit me with room to spare. The good guys at the lot helped me find a few cars in my price range and the drive of one really exceeded my expectations. I brought a few of my buddies to check it out and they were sold as well. The whole process went smoothly and I look forward to working with them in the future. Overall it was ok but the dealership lacks that home town feel. Everyone seemed to be friendly but I don’t get the feeling this will be the only dealership in the area I will buy from Great place to work with. Will definitely visit again The cars are being sold cheaper on other sites not on 405. I buy for my business so I work with multiple car dealerships. I usually buy SUV's and Trucks for my fleet. They make me feel appreciated and show me lots of vehicles. I always feel welcome and know what I am going to walk out of the dealership with. I use to be a salesman and they know what they are doing. You can definitely trust them.
stop
null
MODEL P202683 ;----------------------------------------------------------------------------- ; Title : LATCH Control Routines for 355 Module ;----------------------------------------------------------------------------- ; Revision History ;----------------------------------------------------------------------------- ;062008 : Created. JP ;----------------------------------------------------------------------------- ; Functions ;----------------------------------------------------------------------------- ;SetLtch ;WaitOnLtch ;----------------------------------------------------------------------------- ; Other Routines Used ;----------------------------------------------------------------------------- ;----------------------------------------------------------------------------- ; Notes : ; ;----------------------------------------------------------------------------- ; Global Register Equates ;----------------------------------------------------------------------------- $NOLIST $INCLUDE (def355.inc) $INCLUDE (def5312.inc) $INCLUDE (def202683.inc) $LIST ;----------------------------------------------------------------------------- ; Local Register Equates ;----------------------------------------------------------------------------- dseg at 0 NOPCOUNT ds1 cseg ;----------------------------------------------------------------------------- ; Function: SetLtch ; Syntax : call SetLtch ;----------------------------------------------------------------------------- ; Set latch to high ;----------------------------------------------------------------------------- SetLtch: st #Lat1,CSR ; select latch 1 mov #LatWriteData,WREG movwf CSR2 ; latch data reg write mov #0xff,Lat1Port st #LatReadData,CSR ; Latch 1, Read Data bsf CSR2,0 ; latch data reg write retlw 0xff ;----------------------------------------------------------------------------- ; Function: WaitOnLtch ; Syntax : call WaitOnLtch ;----------------------------------------------------------------------------- ; Wait for latch to go low ;----------------------------------------------------------------------------- WaitOnLtch: btfsc PIR2,2 call WaitOnLtch ; wait for flag to clear st #LatReadData,CSR ; Latch 1, Read Data btfss CSR2,0 ; check latch data call WaitOnLtch ; wait for flag to clear retlw 0xff end
stop
null
ERROR: type should be string, got " https://www.hackerrank.com/challenges/divisible-sum-pairs\n |> fun url -> require(\"BSBrowser\").openBrowser url\n |> ignore\n\n// let t = [|1|]\n// let k = 4\n// let a = [|1; 2|]\n// let t = [|2|]\n// let k = 5\n// let a = [|1; 2; 3; 4|]\nlet t = [|2; 1|]\nlet k = [|5; 3|]\nlet a = [|\n [|1; 2; 3; 4|]\n [|3; 1; 2|]\n |]\n\n// ================================================\n// ============ SOLUTIONS:\n\nlet findAllSumPairs k a =\n let arrayLength = a |> Array.length\n\n let rec loop j sums =\n let sums' = a.[j..] |> Array.fold (fun acc i -> (a.[j] + i) :: acc) sums\n let j' = j + 1\n match j' with\n | j'' when j'' < arrayLength -> loop j'' sums'\n | _ -> sums'\n loop 0 []\n\nlet countEvenPairs k s =\n s |> List.filter (fun sum -> sum % k = 0) |> List.length\n\nlet divisibleSumPairs t k a =\n a\n |> Array.collect (fun n -> a |> Array.takeWhile (fun m -> n <> m))\n |> Array.filter (fun (n, m) -> (n + m) % k = 0)\n |> Array.length\n\nlet divisibleSumPairs' t k a =\n let sumPairs = a |> findAllSumPairs k\n sumPairs |> countEvenPairs k\n\n// ================================================\n// ============ TESTING:\n\nt\n|> Array.iteri (fun i testNumber ->\n a.[i]\n |> divisibleSumPairs testNumber k.[i]\n |> printfn \"Count of sum pairs in array: %A\"\n)\n"
stop
null
Bounding the Two Points #@ Radius We are going to find a quick way to find the radius of \( P_1, P_2 \) by finding the distance between them. You need not use calculus here, we can simply consider the circle passing through \( P_1 \) and \( P_2 \). To find the circle, we use the fact that it passes through both \( P_1 \) and \( P_2 \). This means it lies on the line passing between the midpoint of \( P_1 \) and \( P_2 \) and perpenicular to \( P_1 P_2 \). It should now be a matter of showing that a circle exists and finding its equation. But since we care more about how to find the radius, that's how we'll procede. Recall in the distance formula: $$ \lVert P - Q \rVert = \sqrt{(P^x - Q^x)^2 + (P^y - Q^y)^2} $$ If you visualize this as finding the distance between the two points \( P_1 \) and \( P_2 \). ![The Distance Formula](./images/img3.png) *Image Description Here* *By [Wikimedia](https://commons.wikimedia.org/wiki/File:Distance_formula_2d.svg)* By Pythagorean Theorem, $$\lVert P_1 - P_2 \rVert = \sqrt{(P_1^x - P_2^x)^2 + (P_1^y - P_2^y)^2} $$ You can verify this by considering that \( a = P_1^x - P_2^x \) and \( b = P_1^y - P_2^y \) is the triangle we made using points \( P_1 \) and \( P_2 \) in the diagram. Therefore, $$\lVert P_1 - P_2 \rVert = r + r = 2r $$ This means $$ r = \frac{\lVert P_1 - P_2 \rVert}{2} $$
stop
null
Description: The Chabutte-aux-Coeurs is located just steps off the Champs-Elysees in Paris. It is a romantic spot to stroll and watch people in Paris. There is a nice view of the city from this hill which used to be a cemetery. It also has a little restaurant/coffee shop.
stop
null
<ul class="nav nav-tabs"> <li class="active"><a data-toggle="tab" href="#tab1" class="tab1">Tab #1</a></li> <li><a data-toggle="tab" href="#tab2" class="tab2">Tab #2</a></li> <li><a data-toggle="tab" href="#tab3" class="tab3">Tab #3</a></li> </ul> <script> $(document).on('click', '.tab2', function () { /* first way - empty() */ $('.tab-content').empty().load('viewTab/tab2.php'); /* second way - html() */ $('.tab-content').html("your text goes here!"); /* third way - prepend() /append() /after() */ $('.tab-content').prepend('your text goes here!'); }); </script>
stop
null
Problem Outline: Create a program that simulates a vending machine 1. Display the items in the vending machine with corresponding prices 2. Take the user's input on which item they would like to purchase 3. Ask the user to insert coins and keep track of the amount inserted 4. Check if the amount inserted is enough to purchase the selected item 5. Dispense the item and give change if necessary 6. End the program when the user cancels the transaction ''' # Define the list of items with corresponding prices items = { "Pepsi": 2.0, "Coke": 2.0, "Chips": 1.5, "Gum": 0.75, "Kit Kat": 1.0 } # Define the function to display the items def display_items(): print("Available Items:") for item in items: print(item + ": $" + str(items[item])) # Define the function to get the user's input def get_user_input(): item = input("What would you like to buy? ") if item in items: return item else: print("That item is not available.") return get_user_input() # Define the function to get the user's payment def get_payment(): payment = 0 while payment < min(items.values()): payment += float(input("Please insert a coin or bill: $")) return payment # Define the function to give the user change def give_change(payment, item): change = payment - items[item] if change > 0: print("Your change is: $" + str(change)) return change # Define the main function to simulate the vending machine def vending_machine(): display_items() item = get_user_input() payment = get_payment() if payment >= items[item]: change = give_change(payment, item) print("Please take your " + item) else: print("Insufficient payment.") another_transaction = input("Do you want to make another transaction? (y/n) ") if another_transaction == "y": vending_machine() else: print("Thank you for using the vending machine!") # Call the main function to start the vending machine simulation vending_machine()
stop
null
Hi, I wanted to make a topic with ideas on how to make my ship better. I'm using a battleship. Here's the template: <beginning of the code> [Block 2: 4][Block 2: 4][Block 2: 4][Block 2: 4][Block 2: 3][Block 2: 3] [Block 1: 2][Block 1: 4][Block 1: 4][Block 1: 4][Block 1: 4][Block 1: 2] [Block 1: 2][Block 1: 4][Block 1: 4][Block 1: 4][Block 1: 4][Block 1: 2] [Block 3: 1][Block 3: 2][Block 3: 3][Block 3: 2][Block 3: 3][Block 3: 1] [Block 3: 3][Block 3: 5][Block 3: 5][Block 3: 5][Block 3: 5][Block 3: 3] <end of the code> These are the points I've gotten that seem to really make the ship worse in general: * Front is a little too blunt (in all honesty I didn't even realize until someone pointed it out) * Too many bridges * Hull armor is too thin (like a fourth of what I expected) * Damage Distribution makes no sense * Armor is weirdly spread out * Damage and Armor stats don't add up to what they should be (not like it matters though) * The Block 2: 3's are way too long (they're way past the main engines) * Too weak against things that target armor I am aware that all of this may not matter since a couple of other people said they've beaten everything I'm probably ever going to encounter with 3/3/1 hull and some health and shields, but I just wanted to find out ways to make the ship more reasonable in my mind. Please post some of your ideas on how to improve my ship, I've been working on it for almost the entire day without a good change. [quote=anonymous poster]Thanks, though I don't know if any of that was helpful, what I was wanting was stuff like "switch out that block 2 over there with a block 1" not, "you should've had more of this or that". I know I need more block 1's. Also what does OOS mean?[/quote] OOS = Out Of Sight, generally meaning too far away to shoot or even to be seen.
stop
null
#include<iostream> #include<bits/stdc++.h> #define ll long long int #define pb push_back #define mod 1e9 + 7 #define mp make_pair using namespace std; double b_search(double n) { double i=0; double j=n; while(i<=j) { // cout<<i<<" "<<j<<"\n"; double mid=(i+j)/2; double prod=mid*mid; // cout<<mid<<" "<<prod<<"\n"; if(prod==n) return mid; else if(prod<n) { double prod2=ceil(mid)*(ceil(mid)); if(prod2==n) return ceil(mid); else if(prod2<n) i=ceil(mid); else if(prod2>n) return mid; } else if(prod>n) j=floor(mid); } return i; } double convert(int n) { double s=0; double f=1; int decimal=n; for(int i=0;i<7;i++) { // cout<<s<<"\n"; double temp=s; s+=(n*(pow(10,-1)))%f; f=pow(10,i); } return s; } vector<long long> printdivisors(long long n) { vector<long long>v; long long i; for(i=2;i*i<n;i++) { if(n%i==0) { // cout<<i<<" "; v.push_back(i); } } if(v.size()==1) v.push_back(n/(v[0])); else if(i*i==n) { // cout<<i<<" "<<n/i; v.push_back(i); v.push_back(i); } return v; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); double x; cin>>x; vector<ll>v=printdivisors(x); int fl=0; for(auto it=v.begin();it!=v.end();it++) { // cout<<*it<<" "; double res=b_search((double)*it); fl=1; break; } if(fl==0) res=0; if(res>1) cout<<"YES"<<"\n"; else cout<<"NO"<<"\n"; return 0; }
stop
null
begin require 'rubygems' if RUBY_VERSION[0..2] == "1.8" require "thread" rescue #assume jruby end require "test/unit" module ScalesTestHelper class MockTcpServer def initialize(bind_port,binder) @port = bind_port @stop = false @binder = binder Thread.abort_on_exception=true Thread.new{ server = TCPServer.open("127.0.0.1", @port) while(!@stop) t = server.accept Thread.new(@binder){ |binder| while t received = t.recv(1000) #puts "[SERVER][#{self.object_id}] Received: #{received}" #receive the data, then close the connection (to emulate a shutdown) r = binder.received received t.send(r,0) t.close end } end } @stop=false end def port @port end def stop @stop=true end end module Mocks class StubBase def received(m) m end def reset end end class MockSimpleProvider < StubBase attr_accessor :name, :messages def initialize(name) reset @name = name end def reset @messages=[] end def received(m) puts "Mocks::MockSimpleProvider.received (#{@name}): #{m}" @messages.push m @messages end def process_message @messages.each do |m| puts "Mocks::MockSimpleProvider.process_message: #{m}" @processor.process_message @name , m end @messages=[] end def add_message(m) received m process_message end def set_processor(p) @processor=p end end class MockBulkProvider < MockSimpleProvider @@names = [ "Joe","Bill", "Jim", "Carl","Mack","Dave","Nick"] def initialize(name,size) @size=size super(name) end def received(m) puts "Mocks::MockBulkProvider.received (#{@name}): #{m}" @@names.each do |n| @messages.push n end @messages end def process_message @messages.each do |m| @processor.process_message @name , m end @messages=[] end end end end require File.dirname(__FILE__) + "/../../lib/scales"
stop
null
Good Morning Folks!}} Well the Holiday weekend has come to a close and for those of you that attended and played in the the First Annual Golf Tournament you know that it was a great time. Weather was perfect and everyone played real well. We were glad to see so many in attendance and for those of you that wanted to, please fill out a waiver and donate a little, the money is going toward the Golf Association and for scholarships. Also thanks to everyone that helped with the raffles, the food and refreshments. Our next event will be at the lake on Sunday, August 16th at 7:30 A.M. and then the 2020 Christmas Banquet will be Wednesday, December 16th. Looking forward to seeing everyone on August 16th and then again at the Christmas Banquet. We were really saddened to hear the passing of our friend David Barne a few days ago. David was a class individual and made a lasting impression on all that knew him. Myron and I went down to David's last night and visited with him along with others of his family and friends. Again it was really nice to see how many came by the home. David will surely be missed and our thoughts and prayers are with his family during this very sad time. A few things before I close today's blog. First a huge thank you to everyone for the messages of concern to Myron. Again please keep him in your thoughts and prayers. Also a huge thank you to everyone that donated at the golf tournament. Those donations will be used to get our Golf Association moving and they will also be used to help the Association sponsor those young individuals that are looking forward to continuing their education. Next, I would like for everyone to know that I am new to Facebook and also I am new to the Golf Association so everyone bare with me during this process. I have changed the name and the content of the blog to accommodate the new Golf Association and not only that but to accommodate the growth of the Association. In a few weeks I hope to be able to share pictures of the tournament so make sure you hit that "Like" button, share and join our Group on Facebook and continue to share with others. The more the merrier. I will do my best to include everyone that asked to be included in this blog so make sure that if you didn't send me your contact information that you do now. Also, if you have a news worthy item or information that is of interest, please be sure to send it to me (via E-mail), so that it can be shared in the Blog. Well, that is all I have for now. Hope everyone has a great week and I am looking forward to seeing everyone next month at the lake and then again at the Christmas Banquet in December. See you there.
stop
null
# Import necessary libraries import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.metrics import accuracy_score from nltk.corpus import stopwords import re # Load the IMDb dataset imdb_data = pd.read_csv('/content/imdbDataset.csv', nrows=1000) # Adjust path if needed # Define a simple sentiment analyzer function using TF-IDF and Logistic Regression def build_sentiment_analyzer(data): # Preprocess the data: Remove HTML tags, convert to lowercase, remove stop words, etc. def preprocess_text(text): # Remove HTML tags text = re.sub('<.*?>', '', text) # Convert to lowercase text = text.lower() # Remove punctuation text = re.sub(r'[^\w\s]', '', text) # Remove stopwords stop_words = set(stopwords.words('english')) words = text.split() words = [word for word in words if word not in stop_words] return ' '.join(words) # Apply preprocessing data['cleaned_review'] = data['Review'].apply(preprocess_text) # Split data into training and testing sets X = data['cleaned_review'] y = data['Sentiment'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Create a TF-IDF vectorizer and Logistic Regression model model = make_pipeline(TfidfVectorizer(), LogisticRegression(max_iter=200)) # Train the model model.fit(X_train, y_train) # Evaluate the model y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(f'Model Accuracy: {accuracy:.2f}') return model # Build the sentiment analyzer model sentiment_model = build_sentiment_analyzer(imdb_data) # Function to predict sentiment of a given review def predict_sentiment(review, model): # Preprocess the review def preprocess_text(text): text = re.sub('<.*?>', '', text) # Remove HTML tags text = text.lower() # Convert to lowercase text = re.sub(r'[^\w\s]', '', text) # Remove punctuation stop_words = set(stopwords.words('english')) words = text.split() words = [word for word in words if word not in stop_words] return ' '.join(words) # Preprocess the review cleaned_review = preprocess_text(review) # Predict sentiment sentiment = model.predict([cleaned_review])[0] return sentiment # Example usage new_review = "The film was a cinematic masterpiece. The actors' performances were exceptional, and the plot kept me engaged from start to finish. I highly recommend it." predicted_sentiment = predict_sentiment(new_review, sentiment_model) print(f"Review: {new_review}") print(f"Predicted Sentiment: {predicted_sentiment}")
stop
null
MoO_{3}(1-x)·xV_{2}O_{5}〕/AC催化环己烷选择氧化性能的影响。结果表明,少量磷和氯的添加量与〔MoO_{3}(1-x)·xV_{2}O_{5}〕/AC(x=0.030~0.050)活性组分的键参数以及Mo^{6+}/Mo^{5+}摩尔比有关。磷、氯含量均较低时,MoV催化剂上钼氧键键长R(MoO)和R(Mo-O-Mo)较短,键强度q值较大,催化剂活性较高。随磷和氯添加量的继续增加,R(MoO)与R(Mo-O-Mo)增长,q值降低,〔MoO_{3}(1-x)·xV_{2}O_{5}〕/AC的活性下降。由此认为磷、氯的添加能影响活性组分的键参数,促进选择性氧化性能的提高。本文研究了添加P和Cl对载体AC负载MoV杂多酸盐〔MoO3(1-x)·xV2O5〕/AC催化环己烷选择氧化性能的影响。结果表明,少量磷和氯的添加量与〔MoO3(1-x)·xV2O5〕/AC(x=0.030~0.050)活性组分的键参数以及Mo6+/Mo5+摩尔比有关。磷、氯含量均较低时,MoV催化剂上钼氧键键长R(MoO)和R(Mo-O-Mo)较短,键强度q值较大,催化剂活性较高。随磷和氯添加量的继续增加,R(MoO)与R(Mo-O-Mo)增长,q值降低,〔MoO3(1-x)·xV2O5〕/AC的活性下降。由此认为磷、氯的添加能影响活性组分的键参数,促进选择性氧化性能的提高。Effect of Added Phosphorous and Chlorine on the Catalytic Selective Oxidation of Cyclohexane to Cyclohexanone and Cyclohexanol over 〔MoO3(1-x)·xV2O5〕/AC Abstract : The effect of added phosphorus and chlorine on the catalytic selective oxidation of cyclohexane to cyclohexanone and cyclohexanol with oxygen over AC-supported ammonium heteropolymolybdate was studied. It was found that the amount of added phosphorus and chlorine is related to the active composition parameters such as the bond length R (MoO) and R (Mo-O-Mo) as well as Mo6+/Mo5+ molar ratio of AC-supported ammonium heteropolymolybdate. As the content of added phosphorus and chlorine is low, the parameters such as R (MoO) and R (Mo-O-Mo) are short and the active composition parameters is large, which results in a high activity. With increasing of the added phosphorus and chlorine, the parameters R (MoO) and R (Mo-O-Mo) as well as the active composition parameters is small, which leads to a high activity. With increasing of the added phosphorus and chlorine, the parameters R (MoO) and R (Mo-O-Mo) are larger and the active composition parameters is decreased, which results in a lower activity of catalyst. Hence added phosphorus and chlorine influences the active composition parameters and enhances the selective oxidation of cyclohexane with oxygen.
stop
null
this code requires P5.js library: https://p5js.org/ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ //////////// Example code using custom object. In this case "Beach" /////////////////////////// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ///////////////////////////////////////////////////// //////////// index.html /////// ///////////////////////////////////////////////////// <!DOCTYPE html> <html> <head> <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.3/p5.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.3/addons/p5.dom.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.3/addons/p5.sound.min.js"></script> <link rel="stylesheet" type="text/css" href="style.css"> <meta charset="utf-8" /> <title>Example p5.js</title> </head> <body> <script src="Beach.js"></script> <script src="sketch.js"></script> </body> </html> ///////////////////////////////////////////////////// //////////// style.css //////////////////////// ///////////////////////////////////////////////////// html, body { margin: 0; padding: 0; } canvas { display: block; } ///////////////////////////////////////////////////// //////////// Beach.js ////////////////// ///////////////////////////////////////////////////// var Beach = function (o){ this.o = o; }; Beach.prototype.drawBeach = function(){ push(); translate(this.o.x, this.o.y); //sea fill(5,75,185); rect(0, this.o.y + this.o.seaHeight, this.o.width, this.o.height); //sand fill(243, 212, 107); rect(0, this.o.y - this.o.height, this.o.width, this.o.height); //sun fill(255, 238, 54); ellipse(-50, 50, 50); pop(); }; ///////////////////////////////////////////////////// //////////// sketch.js ////////////////// ///////////////////////////////////////////////////// var b = new Beach({ x: windowWidth/2, y: windowHeight/2, width: windowWidth, height: windowHeight, seaHeight: windowHeight / 4 }); function setup(){ createCanvas(windowWidth, windowHeight); } function draw(){ background(255); b.drawBeach(); } @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
stop
null
3,521,297 A, July 27, 1969 P. D. LaDue ..................................................... 541 879 [75] Inventor: A. K. DeJong, 705 G Street, Suite 54 405/6 4.571,196 A, Oct. 14,1984 D. L. Moore et al. ....................................... 431879 1853.939.6, , 22 Aug. 1943 i L. C. Hirschfeld et al. 431879 (73) Assignee. New Jersey Bell Telephone Company, New [54] 13ark et al. (Nippon Telephone & Telegraph Corporation)........ 431879 60/462.56 X Jersey (US.) [21] Appl. No.: 685,812 H 80266 (Nippon Telephone & Telegraph Corporation)........ 268/37750 188,736.2 (Alcatel) ...................................... 268/37750 (22) Filed: April 30, 1985 (Mitsubishi Electric Corporation) ................................ 541879 814,498 (Motorola) ..................................... 544/376.35 (21) Appl. No.: 685,812 654,402 (Alcatel) ......................................... 268/36543 (US. 405,220 405,248 (Nippon Telephone & Telegraph Corp.) 431879 (US.) 22 Filed Apr. 30, 1985 [51] Int. Cl. 268/37705, 37750, 259/125, 6699 (Primary Examiner-William A. Day (58) Field of Search ..................................... 268/36523, 36543, (Attorney, Agent, or FirmAlvarez, Calderon, Hecht, 37705, 37750, 259/125, 117, 256/591, 465.16. 646; 541 879, 646.55; 6699, 29, 646.67, 669, 669.9; and Faull [56] Rof lations Cited 6700/78; 333/11 R UNITED STATES PATENTS 4,868,898 9/1989 Kino ................................... 268/36523 3,481.596 11/1969 Lockman et al. .................................. 333/319 4,979,241 12/1990 Mark et al. ......................................... 268/36523 3.832.602 8/1974 Horowitz .............................. 333/319 X 11 Claims, 10 Drawing Sheets
stop
null
article{{ {{biblatex:tag1}} {{biblatex:tag2}} {{biblatex:dateyear}} {{biblatex:author}} {{biblatex:title}} {{biblatex:subtitle}}, In: {{biblatex:booktitle}} {{biblatex:booktitleaddon}}, Edited by {{biblatex:editor}}, {{biblatex:volumes}}, {{biblatex:location}} {{biblatex:address}}: {{biblatex:publisher}}, {{biblatex:pages}} ({{biblatex:doiprefix}}{{biblatex:doi}}). {{biblatex:howpublished}} {{biblatex:abstract}} }}} {{{1}}}<ref>Author, Date, Title, JournalName, Volume, Issue, Page (DOI) </ref> | biblatex = url = https://en.wikipedia.org/wiki/Help:Reflist }} <references />
stop
null
GO:0000166 phosphatase activity Catalysis of the hydrolysis of phosphate monoesters, with the release of inorganic phosphate. This is a general term; there are many specific subclasses, for example nucleotide, nucleoside, protein, histidyl, or 3-phosphoglycerate phosphatases. 0 GO:0000290 transcription regulator activity A generic term for proteins that regulate the transcription of a nucleic acid strand to form a complementary strand of RNA by RNA polymerase (Nucleic Acid Template), directly or via other regulator proteins or regulatory RNA molecules, and may include regulatory activities such as methylation, hydroxylation, acetylation and SUMOylation of either of histones or the transcription factor. 0 GO:0004602 telomerase holoenzyme complex The telomerase complex is a ribonucleoprotein complex that consists of a highly conserved telomerase reverse transcriptase (TERT) and an RNA subunit (TR), plus other associated proteins. It exhibits reverse transcriptase activity and uses the RNA template to produce G-rich telomere repeats at the ends of chromosomes during DNA replication. 0 GO:0005634 nucleus A membrane-bounded organelle of eukaryotic cells in which chromosomes are housed and replicated. In most cells, the nucleus contains all of the cell's chromosomes except the organellar chromosomes, and is the site of RNA synthesis and processing. In some species, or in specialized cell types, RNA metabolism or DNA replication may be absent. 0 GO:0005667 chromosome Any of the DNA molecules that form the genetic material of a virus or an organism; each chromosome consists of two very long strands of DNA duplex plus associated nucleoproteins. In bacteria, there is normally a single chromosome, in eukaryotic organisms there are two or more chromosomes, and in viruses there may be one or more chromosomes. Chromosomes are normally visible only when they condense at metaphase and telophase during cell division. Chromosomes in eukaryotes exist in their highly extended state during interphase. In higher organisms chromosomes exist in the cell nucleus or other special organelles (mitochondria, plastids); prokaryotes have a single large nucleoid complex in their cytoplasm, which may be considered a chromosome. Some viruses have single- or double-stranded RNA instead of DNA for their genetic material; this is also referred to as a chromosome. 0 GO:0006355 regulation of transcription, DNA-templated Any process that modulates the frequency, rate or extent of DNA-templated transcription. 0 GO:0008083 protein-DNA complex A protein that is physically associated with a DNA molecule (polydeoxyribonucleotide). 0 GO:0016597 antioxidant activity 0 GO:0019983 regulation of molecular function Any process that modulates the frequency, rate or extent of a molecular function, any activity exerted by a gene product that results in its action on a single molecule or its interaction with another single molecule or ligand, which may or may not be another gene product. 0 GO:0042802 identical protein binding The binding of a macromolecule to another identical macromolecule, or to one of its subunits or complexes of subunits, or to a copy of itself. 0 GO:0046686 response to stimulus The process whose specific outcome is the progression of the organism over time, from its formation to the mature structure. An organism is the fundamental unit of life; it is an entity that maintains its integrity as a viable living system by acquiring resources from its environment and converting them into new cellular components. 0 GO:0061930 regulation of cell organization or biogenesis The process whose specific outcome is the progression of an organism over time from the initial condition of the zygote to the organism's mature form 0 GO:1903558 response to nucleic acid Any process that results in a change in state or activity of a cell or an organism (in terms of movement, secretion, enzyme production, gene expression, etc.) as a result of a nucleic acid stimulus. 0
stop
null
js-jquery】 $()、$.getJSON和$.ajax()的使用(含案例) 278 6KB 2023-10-29 09:53 作者:li_0709 【js-jquery】 $()、$.getJSON和$.ajax()的使用(含案例) <代码开始> <!DOCTYPEhtml><htmllang="en"><head><metacharset="UTF-8"><metaname="viewport"content="width=device-width, initial-scale=1.0"><title>$()、$.getJSON、$.ajax()的使用</title><scriptsrc="jquery-3.4.1.js"></script> <script>// $():可以得到标签,得到标签属性,可以给标签绑定事件,可以给标签设置属性,设置文本内容等等$(function() {console.log($('img').attr('src'));// console.log($('img').length);$('#a1').attr('href','http://ww1.sinaimg.cn/large/6f8305cely1h34cjq9jmyj20g0003gls.jpg'); $('img').attr('src','http://ww1.sinaimg.cn/large/6f8305cely1h34cjq9jmyj20g0003gls.jpg'); $('input').val('I love python!');// alert('2');// click、hover、dblclick、blur、focus、change、keydown(function () {alert('1'); });// 如果某个标签会用到多次,可以先定义变量存储起来vaript1 = $('#ipt1');// 绑定blur事件ipt1.blur(function() {// 获取到文本框中的值// alert(ipt1.val()); // 这样也可以,只不过麻烦了些varname = $('#ipt1').val(); alert(name); $.ajax({url:'test',type:'get',// 后端需要什么格式的数据,通过data这个参数发送// data:'username='+name+'&age='+12,data:{username: name,age:12},// 期待后端返回的数据格式是 json格式 (dataType参数对后端发送的数据格式没有限制)dataType:'json',success:function(data) {// 这里的data一定是json格式的数据alert(data);// [object Object] ,显示的是它的类型// {'k1':'v1','k2':'v2'}alert(data.ret);// 100alert(data.data);// {name: '123'}alert(typeof(data.data));// 显示object,表明后端传来的还是json,再次使用$.parseJSON// alert(data.data.name);},error:function(a, b, c) {// alert('后端处理失败')alert(a + b + c) } }) });varspan1 = $('#span1');varspan2 = $('#span2'); $('#button1').click(function() {// 获取两个输入框中的值// var name = $('#ipt2').val();// var gender = $('#ipt3').val();// 第一种方式:// var data = {'name':name,'gender':gender};// 第二种方式:序列化获取varform_data = $('form').serialize();// name=alex&gender=manalert(form_data);// getJSON()的底层使用的就是ajax// 格式:$.getJSON(url, 数据, 回调函数)// 发送post请求$.getJSON('check_data', form_data,function(arg) {alert(arg.ret); span1.text(arg.msg);// 显示一个标签的内容使用text()span1.css("color","red");// 修改css属性span2.text(arg.data);// 显示一个标签的内容使用text()}) }) }) </script></head><body><inputtype="checkbox"><imgsrc="http://ww1.sinaimg.cn/large/6f8305cely1h21sln3ytpj20dv0f3aa9.jpg"width="300"height="300"><inputtype="text"><br><!-- <a href="http://ww1.sinaimg.cn/large/6f8305cely1h332whjdvbj20c50c5am2.jpg" id="a1" target="_blank">a标签</a> --> <代码结束> jquery 软件/插件
stop
null
recursion is a concept in computer science and programming where a function calls itself in order to solve a problem. In simpler terms, it is a method of solving a complex problem by breaking it down into smaller sub-problems of the same type, and then solving those sub-problems using the same approach. recursion is a function which calls itself ```python def factorial(n): #print(n) if(n ==0 or n==1): return 1 return n*factorial(n-1) factorial(5) ``` 120 1. Call Stack: Recursive calls in Python (and other programming languages) are managed using a call stack. The call stack is a data structure that keeps track of function calls in the order they occur. When a recursive function is called, a new frame (also called a stack frame) is pushed onto the call stack to store information about the current function call, including its arguments, local variables, and return address. 2. Base Case: A recursive function must have a base case, which is a condition that causes the recursion to stop. Without a base case, the recursive calls would continue indefinitely, leading to a stack overflow error, where the call stack becomes too large and consumes all available memory. 3. Recursive Calls: During each recursive call, the function is executed with different arguments, typically with the problem being divided into smaller sub-problems. The recursive calls create a chain of function calls, each adding a new frame to the call stack. 4. Call Stack Management: The call stack manages the order in which function calls are executed and allows the Python interpreter to keep track of multiple function calls and their respective states. The function calls on the call stack are executed in Last-In-First-Out (LIFO) order, which means that the most recently called function is the first to be completed and removed from the stack. 5. Return Values: When a recursive function encounters a base case, it returns a value without making any further recursive calls. The returned value then propagates back up the call stack to the previous function call, which uses it to compute its own result. This process continues until all recursive calls are completed, and the final result is obtained. ```python def fibonacci(n): # Base case: n is 0 or 1, return n if n == 0 or n == 1: return n # Recursive calls return fibonacci(n-1) + fibonacci(n-2) result = fibonacci(5) print(result) ``` 5 ```python def binary_search(arr, target, left, right): # Base case: search range is empty, return -1 if left > right: return -1 # Calculate midpoint mid = (left + right) // 2 # Check if target is at midpoint if arr[mid] == target: return mid # Recursive calls elif arr[mid] > target: return binary_search(arr, target, left, mid-1) else: return binary_search(arr, target, mid+1, right) arr = [2, 4, 5, 6, 8, 10, 12, 15, 18, 20] target = 15 result = binary_search(arr, target, 0, len(arr)-1) print(result) ``` 7 ```python def reverse_string(s, left, right): # Base case: string has been fully reversed if left >= right: return s # Swap characters at left and right indices s[left], s[right] = s[right], s[left] # Recursive call to reverse substring return reverse_string(s, left+1, right-1) s = list("hello") result = reverse_string(s, 0, len(s)-1) print("".join(result)) ``` olleh ```python def factorial(n): # Base case: n is 0 or 1, return 1 if n == 0 or n == 1: return 1 # Recursive call return n * factorial(n-1) result = factorial(5) print(result) ``` 120
stop
null
u a3bIK E O HOBIX MTODOB n . - . , - x B . HCKJIbKO yTpyaTebHee. Bcena. HO: OHA HE HOTOA N - . , , B , a rpaMaTM4ecKoro KycTpa. C , , , , - P IX I BOK. Hana BHO , - . - - , e x, - y e rOM H K. O , KO - I I O- r , H O O M r; e HO , OM, - 3, B CHMMTOKM HHO-H 4HOCT. ![figure](URL) , HO, O TTOCKHO. Iloxae a BaM, Caeka. .. ![figure](URL) C. T, K o? - ? O, npe Bce 9TOMy - KTo T. I' : r . ![figure](URL) ![figure](URL) y M . , , . , . 06y 5, 6 801029 0-06 , 6 8. I , . I KJHOCT. CTOB 3H CJOB: CJIOBI M COBHH, COBH H OBO y OHO. M X O CHHT, H. 4I J, B OMy O C HO , e I XO. I H I IOT MHO K, - n, eHOCT KOTIX OI K M OMO. H. x I, KT - B OHOH 6X HX. I OT X O , , . , IX O- noco , Hax BO3MOKOCT KOHTOJ OCHO- OOHO, - e x - T IX HOBO-TexJ O, IX KOI, CTK- K . X , JI JH T X. KK BOHIO OJKIT, M 6 , , I . , T . , I , . ![figure](URL) ## npenonaBamenb C yHBePCNTET: MeKcyK BOCTOCKH J6MT.. A.C. Cepy30Ba. ![figure](URL) ## - K.JeKTBa. ![figure](URL) . I. KpeB- , - X 3IK, COCTB CBI, HBI, H THIM , - T-6 . - . C. , . ![figure](URL) C , , , B, B, 3B, O.
stop
null
CREATE Procedure spr_list_OrderStatus(@SDate DateTime,@EDate DateTime, @CustomerID nvarchar(15), @OrderStatus nVarChar(1000) , @FilterFor nVarchar(2500), @RetainUserDefinedValues int, @FreeText varchar(128),@OrderID int) As Declare @TempCustomers Table (CustomerID nvarchar(15) Collate SQL_Latin1_General_CP1_CI_AS) If @CustomerID = N'%' Insert Into @TempCustomers Select Distinct CustomerID From Customers Else Begin Insert Into @TempCustomers Select Distinct CustomerID From Customers Where CustomerID = @CustomerID Or @CustomerID = '*' And CustomerID Not IN (Select Distinct CustomerID From Customers Where IsNull(CustomerCategory,'') <> '') End Declare @TempOrderStatus Table (OrderStatus nVarchar(1000) Collate SQL_Latin1_General_CP1_CI_AS) If @OrderStatus = N'%' Insert Into @TempOrderStatus Select OrderStatus From OrderStatusMaster Else Begin Insert Into @TempOrderStatus Select Distinct OrderStatus From OrderStatusMaster Where OrderStatus = @OrderStatus OR @OrderStatus = '*' And OrderStatus Not IN (Select Distinct OrderStatus From OrderStatusMaster Where IsNull(RetainUserDefinedValues, 0) <> 0) And IsNull(RetainUserDefinedValues, 0) = 0 End --***********************************************// Declare @UserDefinedOrderStatus Table ([Status] varchar(1000) Collate SQL_Latin1_General_CP1_CI_AS) if @RetainUserDefinedValues = 0 Insert Into @UserDefinedOrderStatus select '---All---' Status else If isnull(@RetainUserDefinedValues,0) = 1 Insert Into @UserDefinedOrderStatus Select Distinct UserDefinedOrderStatus From Orders Where UserDefinedOrderStatus <> '' --***********************************************// Select Customer_Name = Case IsNull(c.CustomerName, '') When '' Then r.StatusDescription Else r.Customer_Name End, TransactionID, DocumentDate, OrderID, Status, DocumentReference, @SDate [From], @EDate [To], '' CheckedFrom, '' CheckedTo, [Value], '' ValueCheckedFrom, '' ValueCheckedTo, '' PaymentTerms, '---All---' DeliveryTerms From Orders o Inner Join ( Select Distinct CustomerID From @TempCustomers) c on o.CustomerID = c.CustomerID Inner Join (Select Distinct OrderStatus From @TempOrderStatus) OrStatus on o.OrderStatus = OrStatus.OrderStatus Cross Join (Select * From @UserDefinedOrderStatus Where IsNull(UserDefinedOrderStatus, '') = '') r where o.OrderDate Between @SDate And @EDate And (IsNull(o.UserDefinedOrderStatus, '') = '' OR UserDefinedOrderStatus IN (Select Distinct UserDefinedOrderStatus From Orders Where UserDefinedOrderStatus <> '' And Isnull(RetainUserDefinedValues, 0) = 1)) And DocumentReference + CAST(DocumentID as nvarchar) + CAST(o.DocumentDate as nvarchar) + (IsNull(Status, '') + Isnull(UserDefinedOrderStatus, '')) + Convert(NVarChar(100), Convert(int, o.BankID)) + (IsNull(PurcahseOrderNumber, '') + IsNull(DocumentReference, '') + IsNull(Reference, '')) + CustomerID Like '%' + @FreeText + '%' And (@FilterFor = N'All' Or (@FilterFor = N'Date' And Status in ('Uncollected', 'Collected', 'Rejected')) Or (@FilterFor = N'Customer' And Status not in ('Uncollected', 'Collected', 'Rejected'))) And ((@OrderID = 0 And o.OrderID > 1) OR (@OrderID = 1 AND o.OrderID = 1) ) Group By Customer_Name, TransactionID, o.DocumentDate, Status, o.UserDefinedOrderStatus, r.StatusDescription , DocumentReference, o.DocumentID ,o.OrderID, o.BankID, ISNULL(PurcahseOrderNumber,'')+ISNULL(DocumentReference,'')+ ISNULL(Reference,'') ,o.CustomerID, IsNull(Reference, '')
stop
null
6]]]]]]>]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]-->The majority of the 1283 works in the Art on Paper collection originated in the United States or Great Britain and reflect many of the changes that have occurred in these countries over the last hundred years. The American section focuses on developments in the United States from the early 1960s to the mid 1990s and documents a broad range of visual approaches, themes and personal styles. Through these works, we see artists engaging with issues concerning social, environmental and gender-related problems and those relating to their personal lives and relationships. The British selection, predominantly from the 1970s and 1980s, deals with major British concerns of the period - the ways in which the state interacts with the individual, the growing preoccupation with and awareness of social issues, and concerns over the growing importance of technology and communications. The European section concentrates on issues unique to Europe and documents the influence of European life and culture on artists around the globe. The collection includes a wide range of media; drawings, etchings, lithographs, aquatints and woodcuts, as well as works on paper created with oil and acrylic paints, pastel, gouache, collage, and mixed media. The primary concern of Art on Paper is to show works by emerging as well as established artists, with a particular commitment to supporting young or under-recognized artists. As a result, a significant portion of Art on Paper is by emerging artists and those artists who were beginning their careers in the 1960s. The art on paper program is conceived as a gathering place for contemporary works on paper representing artists who exemplify a range of recent developments in visual practice and themes. The works have been assembled over the past 15 years and represent a number of important national and international artists including Faith Ringgold, Mary Heilmann, Alice Neel, Ann Hamilton, Christopher Wool, Chuck Close, and Sophie Calle. They were purchased in part with funds provided by the Works of Art Council. The following artists are represented: Eleanor Antin, Richard Artschwager, Victor Baeza, Michael Beesley, Louisa Burko, David Bush, Mary Carlisle, Pierre Castagna, Donald D'Aquisto, Patrick Demarchelier, John Di Martino, Paul Drummond, Deborah Fisher, Ralph Fitch, Terry Frost, Elizabeth Gee, Allan Giddy, A.R. Gonzales, Marina Grigorieva, David Hammons, Ann Hamilton, David Hockney, J. Jean Houston, Sam Ikin, Eric L. Jones, Marcia Marcus, Robin McKeen, Keith Milow, Alain Morisot, R.B. Kitaj, Joseph Kosuth, George Kuchar, Nobuyoshi Araki, Ashley Longshore, Jane Lombard, Daniel Marzona, Luigi Ontani, Pato O’Ward, Jala Wahid Peacock, Christopher Pope, Luciano Pignatelli, Joseph Raffael, Sorel, Jim Sinks, Wilke and Tress, Inge, Sean Scully, Ernesto Sova, Richard Tuttle, Sue Williams, Christopher Wool, Kenneth Yntema, David Zack.
stop
null
/****************************************************************************** * @file Gpio.c * * @author Mostafa Elsaied (https://github.com/Mostafa-Saied91) * @date 31 August 2023 * @version 1.0 * *******************************************************************************/ /*------------------------------------------------------------------------------------------------------------------- * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * *--------------------------------------------------------------------------------------------------------------------*/ #include "../Inc/Gpio.h" void GPIO_Init(GPIO_TypeDef* GPIOx, GPIO_InitTypeDef* GPIO_InitStruct){ /*----------------------------- GPIO Mode Configuration ------------------------*/ if(GPIO_InitStruct->GPIO_Pin == 0xFFFFFFFF) return; unsigned short num_of_pins_to_config = 1 + __CLZ(GPIO_InitStruct->GPIO_Pin); unsigned long pin_pos = 0x00000000UL; for(unsigned short i=0; i<num_of_pins_to_config; i++){ /* Get the IO position */ pin_pos = (unsigned long)0x00000001UL << __ffs(GPIO_InitStruct->GPIO_Pin); /* Configure the IO mode (Input, Output, Alternate or Analog) */ GPIOx->MODER &= ~(GPIO_MODER_MODER0 << pin_pos) ; GPIOx->MODER |= GPIO_InitStruct->GPIO_Mode << pin_pos; /* Configure the IO speed */ GPIOx->OSPEEDR &= ~(GPIO_OSPEEDR_OSPEED0 << pin_pos); GPIOx->OSPEEDR |= GPIO_InitStruct->GPIO_OType << pin_pos ; /*Configure the IO Output Type*/ GPIOx->OSPEEDR &= ~(GPIO_OSPEEDR_OSPEED0 << pin_pos); GPIOx->OSPEEDR |= GPIO_InitStruct->GPIO_Speed << pin_pos ; /*Configure the IO Pull-up Pull down*/ GPIOx->PUPDR &= ~(GPIO_PUPDR_PUPD0 << pin_pos) ; GPIOx->PUPDR |= GPIO_InitStruct->GPIO_PuPd << pin_pos; } }
stop
null
Is] it reasonable to say that we have a right to something if it is clear we could not obtain it without violating some right? My first reaction was that since rights do not come in degrees, you don't have a right to the thing in question. If a child is born on an iceberg far enough from shore that she would die before getting to it, even with parental aid, it's clearly her right to life that's most fundamental here. It's also clear that it's morally permissible to let her die. Is this because her right to life is not a right to be kept alive? If one were to give her the chance to live by jumping from the iceberg into the water and hoping to make it to shore before she drowns, I think we'd have to say that in doing so, one violates her right not to be assaulted (jumping someone to make them swim for their life constitutes an assault) or even to bodily integrity. At the very least, we'd have to say that we violate some duty we owe her if we do it. But it seems strange to say that in doing something morally wrong to her, we protect a right we owe her. In any case, one can say that drowning is a far, far more awful fate for the child than the assault that is done to her by making her attempt to swim. On reflection, it looks like the answer here is pretty clear: The child does not have a right to be kept alive because she would not have that right even if she were physically capable of making it to shore. Consider other cases. Does a child have a right to food if the parents are impoverished and unable to get it for her? Obviously not, because the parents would not be violating a right if they were simply too poor to afford food. Some of the goods whose possession is deemed to be a right are such that we could not have the right to them without violating someone else's rights or duties. For example, if we consider property rights on any robust Lockean conception, and we consider someone starving in the wilderness, we would not be obligated to allow them to take possession of something of ours to keep them alive. Other goods, on the other hand, are such that the ability to possess them cannot itself be possessed by right, because there may be conditions in which people can't obtain them without violating other people's rights. These are things like being kept alive or kept reasonably well fed if one is poor or landlocked on an iceberg in the middle of the ocean.
stop
null
**/ /*============================================================================= * * This file is a product of Criterion Software Ltd. * * This file is provided as is with no warranties of any kind and is * provided without any obligation on Criterion Software Ltd. or Canon Inc. * to assist in its use or modification. * * Criterion Software Ltd. and Canon Inc. will not, under any * circumstances, be liable for any lost revenue or other damages arising * from the use of this file. * * Copyright (c) 2002 Criterion Software Ltd. * All Rights Reserved. * *===========================================================================*/ /**************************************************************************** * * * Module: stream.c * * * * Purpose: * * Streaming * * * ****************************************************************************/ #include "rwcore.h" #include "rptools.h" #include "rphw.h" #include "rphw.h" #include "rpdbgerr.h" #include "binder.h" #include "binderint.h" /* Local */ #include "stream.h" /** * * \ingroup rpworldbinder * \def RpWorldBinderSetChunkSize * * \brief Sets the chunk size for storing streamed data for * RpWorld objects. * * \desc The default stream chunk size is 8192 bytes, which means that * every 8192 bytes, the data will be wrapped to the next chunk for * storage. The default chunk size is optimal for all current * platforms. However, a different chunk size can be used. * * \param chunkSize - Data stream chunk size. * * \return Data stream chunk size used. * * \see RpWorldBinderGetChunkSize * */ RpWorldBinderStreamChunkSize RpWorldBinderSetStreamChunkSize(RpWorldBinderStreamChunkSize chunkSize) { RWAPIFUNCTION(RWSTRING("RpWorldBinderSetChunkSize")); RWASSERT(chunkSize > 0); RpfWorldBinderChunkSize = chunkSize; RWRETURN(RpfWorldBinderChunkSize); } /** * * \ingroup rpworldbinder * \def RpWorldBinderGetChunkSize * * \brief Gets the chunk size for storing streamed data for * RpWorld objects. * * \desc The default stream chunk size is 8192 bytes, which means that * every 8192 bytes, the data will be wrapped to the next chunk for * storage. The default chunk size is optimal for all current * platforms. However, a different chunk size can be used. * * \return Data stream chunk size used. * * \see RpWorldBinderSetChunkSize * */ RpWorldBinderStreamChunkSize RpWorldBinderGetStreamChunkSize(void) { RWAPIFUNCTION(RWSTRING("RpWorldBinderGetChunkSize")); RWRETURN(RpfWorldBinderChunkSize); } /**************************************************************************** * * \ingroup rpworldbinder * \def RwBool RpWorldBinderStreamSize (const RpWorld *world, RwInt32 *totalBytes) * * \brief Returns size of RPHardwareDataChunk * * \desc Returns total size of all chunk header and payload bytes, does not include * object payload * * \param world - pointer to world for calculating * \param totalBytes - total size of all chunks headers and payload size * * \return TRUE if ok * * * ****************************************************************************/ RwBool RpWorldBinderStreamSize( const RpWorld *world, RwInt32 *totalBytes ) { RwUInt8 totalChunkSize; RwUInt8 dataChunkSize; RwUInt32 dataChunkNo; RwInt32 tempSize; RWAPIFUNCTION(RWSTRING("RpWorldBinderStreamSize")); RWASSERT(world != NULL); RWASSERT(world->worldBinding != NULL); RWASSERT(world->worldBinding->rpHardwareDataChunk != NULL); /* Calculate size of chunks */ totalChunkSize = sizeof(*world->worldBinding->rpHardwareDataChunk); dataChunkSize = 0; for (dataChunkNo = 0; dataChunkNo < world->worldBinding->rpHardwareDataChunk->number; dataChunkNo++) { dataChunkSize += world->worldBinding->rpHardwareDataChunk-> chunk[dataChunkNo].size; } totalChunkSize += dataChunkSize; /* Add in data chunk sizes */ tempSize = 0; for (dataChunkNo = 0; dataChunkNo < world->worldBinding->rpHardwareDataChunk->number; dataChunkNo++) { tempSize += world->worldBinding->rpHardwareDataChunk-> chunk[dataChunkNo].payloadSize; } *totalBytes = totalChunkSize + tempSize; RWRETURN(TRUE); }
stop
null
o'!lS'fJIV'Qlfl'IJQ''Z6 lQ''\I'J~'H!::l~ ~':l!':l ~ '1/':\\"'[.:\~ QVQ 'Z6''1Q'~'1/':\\"'[.:\~.:\6!'1/':\\"'[.:\~ 6'N~'lQ'~1:§_ u7Q'V'1/':\\"'[.:\~..'n~~VQ Q'_Q \Q 'Z6 1 Q u~Q''Z6 1/':\\"'[.:\~ f:~l~''N Q \!~'o'lflV'flQ'fl!'O ~~o1/':\\"'[.:\~,;:!~·1.61:§_ ~~~ ~1:§_ l!f 6'N~'lQ'1/':\\"'[.:\~ 61:'\'11Q':\!':\f\O!!.§1!!
stop
null
On Tue, Jul 31, 2001 at 10:48:41AM -0700, Stephen Adams wrote: |> What does this have to do with Gnome 1.x? |> AFAIK, the problem you described happens only in Gnome 1.x (I see the |> problem with KDE on some machines too) The problem happens on GNOME and GNOME 2, even on windowmaker. This is something that needs to be fixed in the X server (IMHO). > Yes, it happens on both 1.x and 2.x. I am sorry to hear it has nothing > to do with 1.8 but 2.x. No, the 1.8.x bug is something else (it could be caused by X, but as I said, I have KDE problems too with XF86 and i810 video chip...). > A message like this can be useful (unless it is a server bug). It's a server bug or something strange that happens with some drivers... > In Gnome 2, the configuration dialog would no longer try to duplicate This is 100% true, the old Gnome X session has been deprecated (not removed) in GNOME 2.x.
stop
null
@startuml title Document Generation and Handling actor User as actor rectangle "Client Application" as client { participant "File Management" as fileManagement participant "Document Generator" as docGenerator participant "Template Repository" as templateRepo participant "Communication Client" as commClient } participant "Communication Server" as server participant "Content Management System" as cms actor Administrator as admin actor ExternalClient as extClient rectangle "Google APIs" { participant "Google Docs API" as gdocsApi participant "Google Drive API" as gdriveApi } rectangle "PostgreSQL Database" { participant "File Storage" as fileStorage participant "Document History" as docHistory } actor "Remote Client" as remoteClient actor "API Client" as apiClient == Document Generation and Sharing == actor -> fileManagement: Select Template fileManagement -> templateRepo: Fetch Template templateRepo --> fileManagement: Return Template fileManagement -> docGenerator: Generate Document docGenerator -> server: Save Document server -> cms: Upload Document cms --> server: Document ID server --> docGenerator: Document ID docGenerator -> gdriveApi: Create Google Doc gdriveApi --> docGenerator: Google Doc ID docGenerator --> fileManagement: Return Document fileManagement -> server: Share Document server -> gdriveApi: Share Document gdriveApi --> server: Share Link server --> fileManagement: Share Link fileManagement --> actor: Return Share Link == Administrator Operations == admin -> cms: Browse All Documents cms --> admin: List of Documents admin -> server: Manage Document Access server -> gdriveApi: Modify Share Permissions gdriveApi --> server: Success/Failure server --> admin: Confirmation == External Client Operations == extClient -> server: Retrieve Shared Document server -> gdriveApi: Fetch Google Doc gdriveApi --> server: Document Content server --> extClient: Return Document Content extClient -> gdocsApi: Edit Document gdriveApi --> extClient: Edited Document extClient -> server: Save Changes server -> cms: Update Document cms --> server: Confirmation server --> extClient: Acknowledgment @enduml
stop
null
now'', "the present", "soon"). Such an argument would not take us very far, because it would leave open the issue of what now means. Most people find it natural to distinguish between "what was" and "what will be" and "what is". There is the experience of the passage of time which consists of a change from "what is" to "what will be" which becomes "what was" (or vice versa) The English word "now" has two distinct meanings. For example, when somebody dies we say that he's gone "now", i.e. in contrast to "later on". When you finish talking about the "past", you have talked about the "past" and "now" you are talking about the "present", or something similar. So if you talk about "now", then you can talk about what happened "now" i.e. as opposed to later, or you can also say that you did something "now" or "just now" or "last night" i.e. as opposed to a while ago. Therefore, there are at least two different meanings of the word "now" which have to be taken into account. It is of course very difficult to talk about what you actually experienced if you didn't actually experienced what you talk about. That is why language often does not tell anything about experience, but only about belief. Or maybe it does tell something about experience, but not to the people who say it, but to the people who are listening. My brain contains a record of what I experienced. I also "know" that it contains such a record. So do you assume that something exists by the mere fact that I can "see" it in my mind? It is also interesting to note that most of us don't remember most of what actually happened to us, so in some cases our "knowledge" is quite limited compared to what actually happened (our mind has "deleted" most of what actually happened). I also wonder about the fact that the past actually exists. The fact that I "remember" something does not mean that the things that I "remember" actually happened. It could also be an illusion and/or a fantasy. It could also be that things have not happened as I remember. Actually, I think that what happened in the past is quite irrelevant in some aspects. There is only "now", but I may also have "knowledge" of what happened in the past and/or what will probably happen in the future. I can also change the past in my mind, because the past as I remember it, is what is important at this stage. In this sense "past" can only be what we remember from the past, and "future" what we expect to happen in the future. These are only memories, predictions and expectations which are always changing from moment to moment. We can never say what actually happened, or what actually will happen in the future. The future is never known, because the future does not exist. In the "here and now" everything depends on the moment "now". In that moment there is a perception, and a memory of what was happening before that moment. Also there is the expectation that there will be something happening after that moment. These memories are sometimes distorted, and sometimes a new "memory" of an earlier event will be added. This idea of the "here and now" is only relevant if you don't consider your memories from the past. But to me these memories are the most important thing about living. I think that without memories life would be very hard. But I also believe that one should not only live with the past memories but also with the memories that one creates now. These memories of what is happening in this moment are my main concern, because that is how I think I can best predict the future. If you concentrate on these memories from the "now", you can make your life and the future of mankind. The only thing I have no doubt in, is my "memory" of the "here and now". Of course, what you "perceive" is a mixture of what is "real" and what is the result of "interpretation". But, this is different from "false memory syndrome". That is just a memory from the "now" which you don't think about enough (you don't "look" at this memory enough). I'm sure you "look" at memories from the "past" enough, but maybe you don't "look" at memories from the "now" (which you "perceive") enough. That's my assumption. What do you think about that? In order to believe that something exists one has to have experience or have some kind of "evidence". And when we talk about "evidence" then one has to ask the question how this "evidence" was obtained? How do you know that something exists? For example, you say that I say that something exists, so it is a fact. What do you say about this? - Because you can see it, feel it and touch it, it is there. - Well, what do you think about "ghosts" that can't be seen, felt or touched? I guess you will think that "ghosts" do not exist because you can't feel them. But how do you know that? How do you know that when you "see", "feel", "hear" something, it is actually there? You can't tell unless you have some kind of evidence, and you get that evidence from your experience. So when you claim that ghosts exist you are going beyond your experience. Are you sure about this? Also, how can you be sure that "you" exist? Maybe you are just a "dream" that is happening inside my brain. Maybe all you say, everything that you do, everything you perceive is only an illusion. It is just my imagination of what would happen if there was another person in this world. Actually, when we say that "you" exist, then we are making an assumption that someone else also exists. There is no "you", and there is no "me" either. Are you sure about this? If not, can you tell me how do you know for sure that I am not just a dream that is happening in your brain?
stop
null
K+F'+K+1$$ and $\Delta\setminus A_{U_{k+F'+1}}$ is a total $\Lambda\setminus U_{k+F'+1}$-transversal contained in $X'$. Since $X'$ is a critical total transversal of $D^*(X)$ and $X'$ has $k+1$ total transversals, it follows that $\Lambda\setminus U_{k+F'+1}$ is contained in a vertex of $D^*(X)$. Note that $u$ and $v$ do not belong to $A_{U_{k+F'+1}}$. Moreover, if $A$ is a vertex of $D^*(X)$ which contains $\Lambda\setminus U_{k+F'+1}$ and either $u$ or $v$ is a member of $A$, then $(A\setminus \{ u,v\} )\cup \{ t,w\ }$ contains $X'$ as a total transversal. Hence, as $X'$ is the only critical total transversal of $D^*(X)$ which has total transversals other than itself, it follows that no vertex of $D^*(X)$ contains $\Lambda\setminus U_{k+F'+1}$ along with either $u$ or $v$, implying that no vertex of $D^*(X)$ contains $\Lambda\setminus U_{k+F'+1}$. Moreover, if $\{ \Lambda\setminus U_{k+F'+1}\ } \cup T$ is a minimal total transversal of $D^*(X)$ where $T\subseteq \{u,v\}$, then $T\cap \{ u,v\}\ne \emptyset$, for otherwise $X'\subseteq \Lambda\setminus U_{k+F'+1}\subseteq A$ for a vertex $A$ of $D^*(X)$. Thus $T= \{ u\}$ or $T= \{v\}$ and no vertex of $D^*(X)$ contains $\Lambda\setminus U_{k+F'+1}$. Now $D^*$ is obtained from $D^*(X)$ by adding the vertices $\{ x,u\}$, $\{ t,w\}$, $\Lambda\setminus U_{k+F'+1}\cup \{ v\}$ and all the edges in between them. Then $D^*(X)\prec_{\mathcal{O}} D^*$ and $\tau_{\odot}(D^*)=\tau_{\odot}(D^*(X))= k+1$, contradicting the assumption that $D$ is a minimum lexicographic ordering extremal digraph for $\odot_k(n)$. Hence at least one of $X'\cap \Delta_X'$ and $X'\cap \Delta_u$ must be empty. This completes the proof of Claim\ref{clm:crossing}. \end{proof} Recall that the vertices $\{u, x,v\}$, $\{t, w\}$, $U$ are disjoint for every $U\in \mathcal{U}_{F'}$ and that $\{ u, x,v\}$ is a singleton for all $F'\ne U\in \mathcal{U}$. As the proof of the next claim is very similar to that of Claim \ref{clm:crossing} and hence, we omit the proof. \begin{claim}\label{clm:noncrossing} Every $k+1$ crossing free critical total transversal of $D^*(X)$ has non-empty intersection with all vertices of $D^*(X)$ if and only if $\{u,x,v\}$ is not contained in a vertex of $D^*(X)$. \end{claim} Let $H$ be a spanning subdigraph of $D$ that has $\rho_D(U_1),\ldots,\rho_D(U_{k+F'})$ as its vertices and is obtained by the deletion of the edges $(A,B)$ in $D$ that satisfies one of the following. \begin{enumerate}[$(1)$] \item For any $i\neq j\in [k+F']$, $\rho_D(U_i)$ has no outgoing edge to $\rho_D(U_j)$, and \item If $U\in \mathcal{U}\setminus \mathcal{U}_{F'}$, then $\rho_D(U)$ has no outgoing edge to a vertex of $D$. \end{enumerate} Note that $\rho_D(A_U)$ is a vertex of $H$ for $U\in \mathcal{U}$ and it is exactly one of $\rho_D(U_1),\ldots,\rho_D(U_{k+F'})$. \begin{claim}\label{clm:noleaf} No vertex of $H$ is a leaf. \end{claim} \begin{proof} We first show that every vertex of $H$ other than the last vertex has an incoming arc. Let $1\leq i \leq k+F'-1$. Suppose that the vertex $\rho_D(U_i)$ has no incoming arc in $H$. Then we have $d^+_H(\rho_D(U_i))\geq 1$ since $D$ has no cycle of length two and $U_i\in \mathcal{U}_{F'}$. Let $U_j\in \mathcal{U}_{F'}$ be a vertex such that $(\rho_D(U_i),\rho_D(U_j))$ is an arc of $H$. Let $U_j'$ be the vertex $X'$ or $A_{U_{k+F'+1}}$. Let us consider the following subdigraph $D'$ which is obtained from $D$ by the deletion of all outgoing arcs from $\rho_D(U_i)$, the addition of the new vertex $U_j'$, the addition of arcs $(v',U_j')$ for every vertex $v'$ such that $(\rho_D(U_i),v')$ is an arc in $D$ and the deletion of all the arcs $(v',U_j')$ where $v'$ is a vertex of $H$. Note that $\tau_\odot(D')=k+1$ by Lemma \ref{lemma:slicen} and $\rho_D(U_i)$ is a leaf of $H'$. So, by induction on the number of leaves in $H$ we have $D'\preceq_{\mathcal{O}} P_n$. Consequently, $D\preceq_{\mathcal{O}} D'\preceq_{\mathcal{O}} P_n$. This contradiction implies that every vertex of $H$ other than the last vertex must have an incoming arc.
stop
null
I'm not sure you would use "dangling". "Pending" seems to make |more sense to me. |I'd think of a question like "Should we release X now" as a dangling question |once we do the release. | | "Should we release X now?" Pending before the release, "Should we ever release X" Dangling after the release | |> The term dangling question is in reference to a question asked but not |> answered. |And later given a negative answer. | "Should we ever release X" - if we release it then the question is not answered, if not, then the question gets a negative answer. |If we'd decided never to do the release, but a pending question wasn't |closed out, I'd view it as a missed opportunity to tidy up the |tracking system. (It should still be considered as |a closed question, just so we know it is a done thing.) +1
stop
null
I_{2}^{T} I_{1}^{T} \mathrm{R}^{-1} I_{1}}=\hat{\boldsymbol{\Sigma}}_{I, O}$$ Proof. First note that $\hat{\mathbf{J}}_{E} I_{2}=I_{1} \mathbf{J}$ with $\mathbf{J}=\mathbf{X}_{\infty} \boldsymbol{\Sigma}_{x z x}^{-1} \mathbf{X}_{\infty}^{T}$ from Corollary 4.1 and from $\hat{\mathbf{J}}_{E}=\mathbf{J}^{\dagger}=I-\mathbf{J}\left(\mathbf{I}_{m}-\mathbf{J}^{2}\right)^{-1}(\mathbf{J}-\mathbf{I})$. Then use $\hat{\mathbf{\Sigma}}_{I, O}=E_{O}\left\{\left(\boldsymbol{\xi}_{O}-\mathbf{A}_{O} \boldsymbol{\xi}_{I}\right)\left(\boldsymbol{\xi}_{O}-\right.\right.$ $\left.\mathbf{A}_{O} \boldsymbol{\xi}_{I}\right)^{T}$ and $$ \mathbf{A}_{O}=E_{O}\left\{\hat{\mathbf{J}}_{E} \boldsymbol{\xi}_{O} \boldsymbol{\xi}_{I}^{T}\right\}=\hat{\mathbf{J}}_{E} I_{2} \mathbf{R}^{-1} I_{1}^{T}=\left(I_{1} \mathbf{J}-\mathbf{J}^{\dagger} \mathbf{J}\right) \mathbf{R}^{-1} I_{1}^{T}. $$ 4.4. Statistical Properties. The purpose of this section is to establish the conditions under which the least square estimates, $\hat{\mathbf{A}}_{x}$ and $\hat{\mathbf{B}}_{x}$, are optimal, i.e., they possess asymptotic statistical properties that satisfy the Cramér-Rao lower bound [7, 16], such as being unbiased and asymptotically efficient. The following assumptions are necessary to reach this result: # Hypotheses H4.3. (i) $\boldsymbol{\xi}_{k}$, for all $k$, are independent and identically distributed random vectors with zero mean. (ii) The pair $\mathbf{\Sigma}_{\boldsymbol{\eta} \boldsymbol{\eta}}$ and $\left(\boldsymbol{\Sigma}_{\boldsymbol{\zeta} \boldsymbol{\eta}} \boldsymbol{\Sigma}_{\boldsymbol{\eta} \boldsymbol{\eta}}^{-1} \boldsymbol{\Sigma}_{\boldsymbol{\eta} \zeta}-\boldsymbol{\Sigma}_{\boldsymbol{\zeta} \zeta}\right)$ is positive definite. In the linear deterministic model $\mathbf{S}_{k+1}=\mathbf{A} \mathbf{S}_{k}$, if the rank condition [15, p. 226], $\operatorname{rank}\left(\mathbf{I}-\mathbf{A} \mathbf{A}^{T}\right)^{-1} \mathbf{A}=n$, is violated, then $\hat{\mathbf{A}}_{x} \stackrel{P}{\rightarrow} \mathbf{A}$ cannot be achieved. In other words, there is no observation matrix that can guarantee convergence. It has been shown that for this rank condition, the number of rows of the observation matrix, $r$, must be greater or equal to the number of parameters to be estimated [15, p. 228], i.e., $r \geqslant n$; in this case $r=n$ and there is a loss of efficiency as compared to the deterministic parameter estimation case. Thus, to be consistent, it is necessary to have $\operatorname{rank}\left(\mathbf{I}-\hat{\mathbf{A}}_{x} \hat{\mathbf{A}}_{x}^{T}\right)^{-1} \hat{\mathbf{A}}_{x}=n$, and therefore, ## 1. Hypothesis H4.4. $\left(\mathbf{I}-\hat{\mathbf{A}}_{x} \hat{\mathbf{A}}_{x}^{T}\right)^{-1} \hat{\mathbf{A}}_{x}$ is positive definite for all $\hat{\mathbf{A}}_{x}$, where $\hat{\mathbf{A}}_{x}$ is defined in (4.49). Note that the vector $\overline{\mathbf{x}}_{k}$ in (4.47) is assumed to converge to an equilibrium point $\overline{\mathbf{x}}$, i.e., $\lim _{k \rightarrow \infty}\left\|\overline{\mathbf{x}}_{k}-\overline{\mathbf{x}}\right\|=0$ for all $\boldsymbol{\xi}_{k}$, and similarly for $\mathbf{z}_{k}$, $\overline{\mathbf{\Sigma}}_{z z} \in L$ and $\overline{\boldsymbol{\Sigma}}_{x z z} \in L$. As a consequence of the assumption given in (4.42), the vector $\boldsymbol{\xi}_{k}$ is stationary and therefore, in general, $\operatorname{cov}\left[\boldsymbol{\xi}_{k}, \boldsymbol{\xi}_{l}\right] \neq 0$ for $k \neq l$. This necessitates the introduction of Hypotheses $\mathrm{H} 4.2$ and $\mathrm{H} 4.4$ to achieve the convergence properties as outlined in the following theorem. ## 2. Theorem 4.4. (i) Under Hypothesis $H 4.3 \hat{\mathbf{\Sigma}}_{x z z} \stackrel{P}{\rightarrow} E\left[\boldsymbol{\xi}_{k} \boldsymbol{\xi}_{k}^{T}\right]$ with probability 1. (ii) Under Hypotheses H4.3 and H4.4: $\hat{\mathbf{A}}_{x} \stackrel{P}{\rightarrow} \mathbf{A}$ and $\hat{\mathbf{B}}_{x} \stackrel{P}{\rightarrow} \mathbf{B}$. Proof. To establish $\hat{\boldsymbol{\Sigma}}_{x z z} \stackrel{P}{\rightarrow} E\left[\boldsymbol{\xi}_{k} \boldsymbol{\xi}_{k}^{T}\right]$ with probability 1, the result given in Theorem 4.1 is used which states that $S_{k}=\mathbf{x}_{k} \mathbf{x}_{k}^{T} \stackrel{P}{\rightarrow} \mathbf{S}_{\infty}$, where $\mathbf{S}_{\infty}$ is the stable matrix equilibrium defined in (4.8). The proof then follows along similar lines as in the deterministic case and using (4.66). Note that from Theorem 4.1 the following properties can be derived. From (4.8) $$ \overline{\boldsymbol{\Sigma}}_{z z}=\mathbf{G} \hat{\mathbf{z}}_{0} \hat{\mathbf{z}}_{0}^{T} \mathbf{G}^{T}=\mathbf{G} \boldsymbol{\Sigma}_{\boldsymbol{\eta} \boldsymbol{\eta}} \mathbf{G}^{T}=\overline{\boldsymbol{\Sigma}}_{x z x}. $$ Asymptotic consistency with probability 1 is defined by $$
stop
null
!I$9H)H!ÌÌÄÌÌÌ" )H!9!56840!!K3 " $Î3*"
stop
null
( _v_ 0, _x_ 1,..., _x n_), with (1) ⇒ (2). . Hint: Notice that in this case, the operations which can also be written using the exponential function (from the preceding exercise) and the additive inverse, are closure operators for the relations _R i_ ( _i_ = 1,2,..., _n_ ). It then follows from the first part of exercise 5 that relations are _P_ -primitive recursive; their disjoint union (compare the first part of exercise 2) is, for suitable functions , an equivalence relation , and finally is also _P_ -primitive recursive (why?), and thus . If _E_ 1,..., _E n_ form a partition of _N k_, then also (for any set _A_ ). . We only look at the implication from left to right. The assumption means that for each _j_, ( _i_ 1,..., _i j_, _x j_) ∈ _R_ _m_ \+ 1 iff there exist _x_ 1,..., _x j_−1 ∈ _N_, such that Thus . Now use the second part of the preceding exercise. . This is a modification of a proof due to Hartley Rogers, Jr. (recall the comments following theorem 2.5.2). First prove, using definition 3.4, that for every relation _P_, the condition implies (in both formulas, ). Then consider the formula: Let ; its satisfiability, established earlier, implies the existence of a natural number _i_, such that (1) Notice that for an arbitrary -predicate _Q_, and for an arbitrary -predicate _R_, The formula implies that _T_ ( _i_, _i_ ), hence, since ( _i_, _i_ ) ∉ _S_, by (1), (2) and (3), ; i.e., the formula is true. It now follows from (1) and (3) that ( _i_, _i_ ) ∈ _S_ ; i.e., which contradicts (4). . (a) We have: and the right-hand side is primitive recursive. _(b) Hint:_ Consider , defined as follows: . First prove the lemma: For any classes of relations, the formula implies . (a) That this relation is , follows, as in example 4.1.13, from the fact that , where the sequence of terms is easily seen to be primitive recursive. The relation is clearly primitive recursive; it thus remains to be shown that it is not in . Assume that it were. Then the function _f_ defined by the formula would be primitive recursive (cf. example 4.1.9). But we know (see the proof of theorem 4.1.4) that this function cannot be primitive recursive, which gives us the desired contradiction. _(b) Hint:_ Notice that iff , and apply part (a) and exercise 4 above. . Let _R_ ( _a_, _b_, _c_ ) be a primitive recursive relation, such that for all _x_, Let . The formula holds; so for _c_ = _i_, we have that for every _x_. In particular, , i.e., for every _x_, which contradicts the choice of _R_. . Suppose, to the contrary, that _J_, _K_ were primitive recursive. Then the relation would also be primitive recursive (why?), and hence its diagonalization, would be primitive recursive (cf. exercise 2, p. 169). However, we know that this relation is not even arithmetical (cf. the solution to exercise 6(a)). . _Hints:_ (a) If , we must have . For all . _(b) Hint:_ Use the fact that . _Hint:_ For and : . Hint: Consider the formulas
stop
null
Elmer Robinson]]. {{P}} '''It was a dark and stormy night''' is the first line of '''''Paul Clifford''''' (1830), written by [[British]] novelist [[Edward George Bulwer-Lytton (writer)|Edward George Bulwer-Lytton]]. Lytton won a bet that it was possible to write the opening lines of a best-selling novel consisting of nothing but adjectives and the adverb <i>very</i>. The passage reads: <center><i>It was a dark and stormy night; the rain fell in torrents, except at occasional intervals, when it was checked by a violent gust of wind which swept up the streets (for it is in London that our scene lies), rattling along the house-tops, and fiercely agitating the scanty flame of the lamps that struggled against the darkness.</i> <sub>Edmund Blackadder and Baldrick disagree as to whether this opening was inspired. They compare the first line of Clifford against other great opening sentences (such as [[Jane Austen]]'s <i>Sense and Sensibility</i>).</sub> The scene was created for [[The Complete Works of William Shakespeare (Abridged)], the play where we meet Edmund Blackadder. It's one of the most famous scenes in the entire [[Blackadder]] series. It was actually filmed in 2001, which would explain why the costumes differ from the costumes we see in the rest of ''Back & Forth''. + In this scene, Blackadder and Baldrick attempt to perform the opening scene of the 2003 Academy Award winning movie (and all time cult favourite) [[The Lord of the Rings]], the hobbit ''[[Frodo Baggins]]'' finds the [[One Ring]] and declares "It shall be a mighty weapon" ("no," Baldrick responds, correcting Blackadder in a moment of rare insight, "it's meant to be a ring"). In another nod to the film, in a previous scene, Blackadder refers to the ring as "that Ring of power, forged in the very fires of mount doom". Blackadder then attempts a second, more ambitious performance -- one of [[Edward George Bulwer-Lytton]]'s most famous (or infamous) opening lines -- the first line of the book ''[[Paul Clifford]]'' which begins, "It was a dark and stormy night". With Baldrick's support, Blackadder launches into his performance with much pomp and ceremony only to receive a slap in the face from Baldrick who belittles it as "infantile drivel". + Blackadder: "This was the worst line I have ever heard. Honestly I was...shocked!" Baldrick: "Shocked indeed..." When Baldrick claims he's been writing Shakespeare's plays in between time travelling and they have "all been best sellers", Blackadder remarks "You know, Baldrick, for somebody who hasn't got a brain I quite like you." This line was used in the opening to the "Making of Back & Forth" extra on the DVD. The main dialogue is as follows: Baldrick: [In a thick ''Australian'' accent] "Ed...Ed, mate." Blackadder: "What is it, Baldrick, this isn't a picnic." Baldrick: [As Shakespeare] "But it was a dark and stormy night! Baldrick: "Can you imagine a more improbable start to a film?" Blackadder: "No" Baldrick: "How is it that...?" (He is stopped from completing his question when Blackadder raises his hand. This time in his Australian accent.) Blackadder: "Ah, it's okay, my man. I was going to suggest something a little less trite." Baldrick: "Oh, like what, mate?" Blackadder: "Like what?..." Baldrick: "Yes?" Blackadder: "Let's make a start." Baldrick: "Start with what?" Blackadder: "Any story can only begin one way." Baldrick: "So?" Blackadder: "It was the best of times...it was the worst of times..." Baldrick: "The time of chivalry, of long swords and shields." Blackadder: "It was a dark and stormy night." Baldrick: "Which way are we doing it now?" Blackadder: [Mocking him] "The worst is yet to come." + The audience in this performance are a pair of [[alien]]s. They are introduced by [[Professor Riemann Schultz]] who claims to have "created the first ever portal linking Earth to the universe of other humanoid worlds". He also refers to these aliens as the "Podunkians" and appears to be enamoured by them. The Podunkian are similar in appearance to a cross between [[Troy McClure]] and [[Ugly naked guy]], the best known alien from the sitcom ''[[Friends (sitcom)|Friends]]''. + =References= {{Reflist}} + '''''Note: The following information has not yet been verified:''' + == {{Blackadder Links}} ==
stop
null
==:===:==: SIZ = 1 ----+----1----+----2----+----3----+----4----+ -i- +...+..:....1....+....2....+....3....+....4....+....5....+....6....+. 0000 $f3b1a1fd 503ab065 3d0c010d 7ef56d58 ????.P;:e=...~.mX 0010 b15d2c6b 1a2145f1 9e67ea3b b610fa4b .],k.!E..g.;...K 0020 9e2a835c 43e90da7 392ef4fd a2c510ba .*.\C...9......." 0030 49c0cb7a 3da59ce5 b5505663 b9843eeb I..z=.....PVc..>. 0040 9ff4b9cd 23d1ec28 98583134 78d22193 ....#..(8X14x.!" 0050 8890625b f86b2ee1 7bdcf339 d2b9cf42 ..b[8k..{..9.."B 0060 701f310a 895eb973 8d7cb31b bcf46f05 p.1.."^.s.|..."o. 0070 2e76b61f b60b00e6 02006e00 c6030000 .v.........n..... */ int main(){ //calloc() function is useful to allocate storage in contiguous memory. int n,*p,*q; printf("\nHow Many Numbers: "); scanf("%d",&n); p = calloc( n , sizeof(int)); if(p==NULL) { printf("\nSorry We Are Not Able To Create..."); exit(1); } for(int i=0; i<n; i++) { printf("\nEnter Number %d : ",i+1); scanf("%d",p+i); } for(int i=0; i<n; i++) { printf("\nThe Number %d : %d ",i+1,*(p+i)); } /* free(p); // p = NULL; for(int i=0; i<n; i++) { printf("\nThe Number %d : %d ",i+1,*(p+i)); }*/ q = realloc(p,n+5); if(q==NULL) { printf("\nSorry We Are Not Able To ReCreate..."); exit(1); } for(int i=0; i<n+5; i++) { printf("\nEnter Number %d : ",i+1); scanf("%d",q+i); } for(int i=0; i<n+5; i++) { printf("\nThe Number %d : %d ",i+1,*(q+i)); } printf("\nPress Any Key To Exit.."); getch(); return 0; }
stop
null
U = wwT = A\ 128 A N E W LAPLACIAN MOMENT PROBLEM so that U has rank 1 and hence U — c14 for some c ^ 0. This implies that Aw = cw, i.e. A 2k 2k+2 v = = cvi+2k. (5.6) But v\ = (a,0,0, •••,0,•••), and from (5.5) the recursion relation (5.6) is uniquely solvable. Hence U, and thus S as well, is uniquely determined. Finally, Theorem 3.3 asserts that the minimal solutions for K2 and V2 are unique, and from Theorem 3.6 it is clear that the same is true for Ki and Vi as well. This establishes the uniqueness for the present situation and completes the proof for part (ii). Remarks. The arguments in this section also prove the uniqueness for the corresponding set of trigonometric moment problems stated in §3D. 2. The arguments in this section essentially apply to the original LMP as well. The only difference is that here we must take y = and deal with the system of equations 1 = a + yp, 0 = a<p — y. But the above system also possesses a unique solution for <p = a/a2 + p and y = —l/p + a2. However, there are many situations in which we can not apply Theorem 3.3 due to the rank of the moment matrix D = O. In such cases, the argument in this section does not work. Thus we cannot use this kind of a functional to settle the question of uniqueness of solutions. V. The Spectral Transformation Approach In §3, we characterized the solutions and in §5, we found sufficient conditions for the uniqueness of solutions to the general LMP. In this section we develop an alternative approach to characterizing solutions by transforming the problem into the setting of a moment problem with finite limits of integration, for which there exist well-developed and deep results. This section was inspired by the paper of Fujii [2] who has used the idea of spectral transformation to characterize solutions of the standard moment problem. 5 THEOREM 6.1. Let a ^ 0, /? = 0, and S be a solution to (1.1). Let T be a point in the unbounded component D° of D = C — cr(S). If <S is analytic in D and Hp(S,T) = a-\— (T — S)~l^6, where a = /3 + STr(d/T) Hp(S,T), then <S is a unique minimal solution corresponding to the unique minimal solution T. Proof. If <S is a solution to (1.1), then it is an analytic Stieltjes function and hence, S belongs to D°. From Lemma 4.2 and Theorem 4.6, 6 = o + I Tr(d/T)Hp{S,T) with a,/3 & R, where dT is the spectral measure on C associated with the Cayley transform of T. Since /? = 0, the relation in the theorem is readily verified. For the minimality of (5 and the uniqueness of <S and T see the second assertion in the proof of Theorem 5.3. Remark. From the condition <p(oo) = oo, if Hp(S,T) = (a — l)/(T — S) on D°, then (a, /3) = (—1,0). Thus, Theorem 6.1 gives some relation between solutions of the LMP and the corresponding extended Schur class. In fact, Hp(S,T) is a Schur class function whose value at oo is zero. Moreover, we can not let T belong to C — D° in such a theorem due to analytic continuation principle. For a discussion of the Schur class and extended Schur class we refer to [1]. THEOREM 6.2. (Characterization of Solutions) Let a ^ 0, 7=0and p = 0 and let /? = l/p + a2 with 22^0. Define the functional by = r\ — f^S1{X)d,{X), (6-1) T-J r, where D° is the unbounded component of the complement of the spectrum of a solution, and dt is the corresponding spectral measure. Suppose that <S is not a constant function. Then (6.1) uniquely determines Hp(S,T), and a positive Borel measure p on the real line is a solution to (1.1) if and only if the absolutely continuous part of 5 132 A N EW LAPLACIAN MOMENT PROBLEM its spectral measure with respect to Lebesgue measure is invariant under the transformation z i— > l/z. Proof. Suppose p is a solution of (1.1), and let d be its spectral measure. Since Hp(S,T) is analytic on D°, it is easy to show that the right-hand side of (6.1) defines a bounded linear functional of S. Since S is an analytic Stieltjes function, a theorem of Schoenberg (see [5]) implies that pT((s,oo)) = <p((s,oo)) is a measure on the real line whose Cauchy transform is Hp(S,T). Hence <p and pT agree on the interval (s, oo). Since <p is not a constant function, repeated differentiation reveals that they agree at 00 as well, and thus <p must be the Stieltjes transform of pT. Now let d be the spectral measure associated with <S and p^ the spectral measure for the spectral transformation 0. Then we have hp(S,T)(t)= l\S{T)-T)^^ = JTz{T)-T)^z,dp^, (6.2) and so the two measures on the right-hand side are equivalent on (-oo,oo). This implies that d is absolutely continuous with respect to dpT. Hence, the absolutely continuous part of d with respect to Lebesgue measure is invariant under z »-> I/z and therefore the Fourier transform of a function analytic and bounded in \z\ < 1 under the inversion of the z-variable. But any measure whose Fourier transform has these properties is uniquely determined, and hence is given by (6.1). This completes the proof of Theorem 6.2. 6 BIBLIOGRAPHY REFERENCES 1. D. Alpay, M. E. Corach, and J. Peccati. Some observations on Schur and Nevanlinna functions of class D. Int. J. Math., 16:155-182, 2005. 2. H. Fujii. Spectral transformation approach to Hamburger-Stieltjes mo- ment problem. Houston J. Math., 13:1-7, 1987. 3. A. Gilat. The Laplacian moment problem. In J.-P. Antoine, D. Baleanu, M. Demirci, Y. Luchko, M. Mursaleen, M. A. Noor, M. Mursaleen, and M. A. Noor, editors, Fractional Calculus, Theory and Applications, pages 375-380. InTech, 2012.
stop
null
Arithmetic and Shifts .global main .text main: # Storing Values in Memory: # This section illustrates how to store values in memory using the `str` and `strb` instructions in ARM assembly. # It demonstrates storing a 4-byte integer and a 1-byte integer in memory. # Data Section .data memory_: .word 0x00 // This represents the 4-byte integer, initially set to 0x00 memory_b: .byte 0x00 // This represents the 1-byte integer, initially set to 0x00 memory_d: .space 4 .text LDR r1, =memory_ LDR r2, =memory_b LDR r3, =memory_d MOV r4, #0xDEADBEEF // Integer to store (4 bytes) MOV r5, #0xAA // Byte to store (1 byte) STR r4, [r1, #0] // Store the integer in memory STRB r5, [r2, #0] // Store the byte in memory # Loading Values from Memory: # This section demonstrates how to load values from memory using the `ldr` and `ldrb` instructions in ARM assembly. # It demonstrates loading a 4-byte integer and a 1-byte integer from memory. .text mov r6, #0x95114430 STR r6, [r1, #0] // Store the integer in memory LDR r4, [r1, #0] // Load the integer from memory LDRB r5, [r2, #0] // Load the byte from memory # Array Section # This section demonstrates how to work with arrays in ARM assembly. # It includes instructions for accessing and modifying array elements. .data numbers: .word 10, 20, 30, 40, 50 // An array of 5 integers .text mov r6, #4 LDR r6, =numbers // Load the address of the array into R6 LDR r4, [r6, #0] // Load the first element (10) into R4 LDR r5, [r6, #12] // Load the fourth element (40) into R5 LDR r7, [r6, #24] // Load the fifth element (50) into R7 STR r8, [r6, #20] // Store 31 into the second element STR r9, [r6, #4] // Store 21 into the third element # Array Indexing # # This section demonstrates array indexing using registers. # We will load and store array elements using index registers. .text LDR r6, =numbers // Load the address of the array into R6 MOV r7, #2 // Set R7 as the index (2 means the third element) LDR r9, [r6, r7, LSL #2] // Load numbers[2] into R9 (multiplied by 4 for word size) MOV r8, #30 // Load 30 into R8 STR r8, [r6, r7, LSL #2] // Store 30 into numbers[2] (multiplied by 4 for word size) # Multidimensional Arrays # # This section demonstrates how to work with 2D arrays in ARM assembly. # It includes instructions for accessing and modifying 2D array elements. .data matrix: .word 1, 2, 3 .word 4, 5, 6 .word 7, 8, 9 numRows: .word 3 numCols: .word 3 .text LDR r1, =matrix // Load the address of the matrix into R1 LDR r6, =numRows // Load the number of rows into R6 LDR r7, =numCols // Load the number of columns into R7 LDR r6, [r6] // Load the number of rows from the memory address in R6 LDR r7, [r7] // Load the number of columns from the memory address in R7 LDR r4, [r1, #0] // Load matrix[0][0] (1) into R4 MOV r8, #2 // Set row index to 1 (element [1][1]) MOV r9, #1 // Set col index to 1 (element [1][1]) MOV r6, #0 // Zero r6 MOV r7, #0 // Zero r7 LDR r6, =numCols // load numCols LDR r7, [r6] // r7 = numCols // LDR r7, =numCols // load numCols address (useless) MUL r10, r9, r7 // rowOffset = row * numCols = 1 * 3 = 3 MUL r11, r8, #4 // colOffset = col * wordSize (4 bytes) = 2 * 4 = 8 ADD r12, r10, r11 // elementOffset = rowOffset + colOffset = 3 + 8 = 11 LDR r4, [r1, r12] // Load matrix[2][1] (7) into R4 MOV r4, #45 // 45 into r4 STR r4, [r1, r12] // Store 45 into matrix[2][1] (replace the 7 with 45) # Dynamic Memory Allocation # This section demonstrates dynamic memory allocation using the `bl` instruction to call assembly routines for allocation and deallocation. .text bl allocateMemory // Call the assembly routine for memory allocation mov r1, r0 // Store the allocated memory address in r1 for deallocation bl deallocateMemory // Call the assembly routine for memory deallocation .data dynamic_memory_address: .word 0x00 .text allocateMemory: // Assembly routine for memory allocation STMDB sp!, {lr} // Save the return address on the stack bl malloc // Call the C malloc function STR r0, dynamic_memory_address // Store the allocated memory address in dynamic_memory_address ADD sp, sp, #4 // Restore the return address BX lr // Return to the calling function deallocateMemory: // Assembly routine for memory deallocation STMDB sp!, {lr} // Save the return address on the stack LDR r0, =dynamic_memory_address // Load the allocated memory address LDR r0, [r0] bl free // Call the C free function MOV r0, #0 // Clear the dynamic_memory_address STR r0, dynamic_memory_address ADD sp, sp, #4 // Restore the return address BX lr // Return to the calling function
stop
null
DESC (b-36561-4)'] (array([ 0., 100., 30300., 50400., 60500., 61500., 62500., 63500., 64500., 65500., 66500., 67500., 68500., 69500., 70500., 71500., 72500., 73500., 74500., 75500., 76500., 77500., 78500., 79500., 80500., 81500., 82500., 83500., 84500., 85500., 86500., 87500., 88500., 89500., 90500., 91500., 92500., 93500., 94500., 95500., 96500., 97500., 98500., 99500.])) ```python # Try to find the class feature by itself model = RandomForestRegressor() ``` ```python model.fit(X_train, y_train) y_predicted = model.predict(X_test) ``` ```python # Do not include in your report print(r2_score(y_test, y_predicted)) ```
stop
null
now, the best performance is achieved by the cascaded de- cascading $[4,20]$ with the lower switching frequency operating at the main filter resonant frequency (i.e., at the inverter bridge switching frequency) and the second filter resonant frequency chosen higher to limit the second harmonics in the output inductor current as shown in Fig. 9-11. A third-order low- pass filter consisting of a $L C$ filter followed by an inductor can also be employed, but due to the nonideal inductor quality factor (due to capacitor voltage control mechanism), the out- put inductor current ripples are not usually very low compared to the second-order $L C$ filters and sometimes require control action to eliminate the low-frequency components. Higher har- monic content also leads to higher filter losses. Therefore, the second-order $L C$ filter is mostly utilized in boost converter in verter-based systems, making the converter a third-order one(Fig. 9-11) and causing large inductor current oscillations. The active damping with capacitor voltage feedback [23] is thus em- ployed with a cascaded filter to avoid this phenomenon. ![figure](URL) FIGURE 9-11 ## 9.5.2 Control Strategy of the Three-Phase Grid-Tie AC/DC Converter System The two most well-known control methods are voltage and current control. With voltage control, the input voltage (at the dc-link) of the inverter is used as the outer-loop control vari- able. However, with current control, the reference current is generated in the outer-loop as explained below, and then this reference current is forced as the output of the converter. The grid-tied converter generally uses current control. Therefore, only the current control method has been presented below. Based on the three-phase load current requirements and grid voltage as a feedback, the re- quired $d q$ current reference components are generated using phase-voltage oriented Park trans form. In case of three-phase currents generated independently, a delay and a phase shift is likely to happen making the system unbalanced and causing an oscillating reactive power be- tween the source and the converter. In case of an islanded network, the dc-link voltage is regu- lated at a fixed value by adjusting the output active power (through a $q$ -axis current controller). As per the power transfer requirement, the real power is actively controlled in a two-stage topology by utilizing the boost converter. Generally, a PI control mechanism is used, and the reference $d q$ -axis current components are generated as per the desired active and reactive power transfer requirements (where $d$ and $q$ current components in the reference frame repre sent reactive and active power transfer, respectively). A typical control schematic is shown in Fig. 9-12 for a grid-interconnected system. By appropriate gating signals, the current $i_{a c s}$ of the converter is made to follow the current reference $i_{a c}^{*}$ , which is further derived from the ex ternal dc load current and active power references of the entire system. Using grid-voltage ori- ented Park transform with the help of a synchronizing circuit [8], the current references are derived from $i_{a c s}$ and $V_{d c}$ . The resulting reference currents are processed into pulses at the converter gate as shown in Fig. 9-12. As discussed in Section 9.5.1, with the help of active damping techniques [8], the grid-side filter inductor current is made sinusoidal, and in the steady state, there is no pulsating energy stored in the grid-side inductors, which results in no har- monic power exchanged with the grid. However, the boost converter output voltage is required to be as ripple free as possible to reduce the input current distortion. Thus, the output inductor of the boost converter plays a vital role in smoothing its output voltage. The boost converter control architecture is described in Section 9.3.3. ![figure](URL) FIGURE 9-12 ## 9.6 Grid Tied Four-Leg Inverter Operation As discussed in Section 9.5, the grid-connected inverter uses the load or dc current as a feed- back to control the inverter current, whereas in a standalone microgrid application with unbal- anced and nonlinear loads, the inverter controls its current to regulate the ac-side voltage at the converter end. As stated above, the grid-tied inverter uses current control method (discussed in Section 9.5) and three- or four-leg topologies are generally used in these applications. Con- sideration of the configuration of the inverter side is outside the scope of this discussion and the control approach discussed here is focused on voltage control method, where the inverters ![figure](URL) FIGURE 9-13 Block schematic of two-stage boost converter with four-leg inverter topology. work in closed loop to control the ac-side voltage at the converter end. Although only boost converter (first-stage) control is discussed below, all other converters in a dc microgrid may have similar control requirements as for the first-stage converter for the load-leveled dc voltage. As illustrated in Fig. 9-13, the boost converter-based four-leg inverter topology with dc-link and battery converter interfacing converters is shown, where a low-frequency grid interlinking ac/dc link voltage and battery converter dc-link voltage are kept constant. The control scheme of the four-leg inverter is shown in Fig. 9-14 where both negative andpositive sequence control loops are employed in two-step for controlling the load voltage $V_{a c r, d e}$ at the inverter end (which has only positive sequence components if balanced load is considered). A PI controller is also used for the frequency control. The operation of a four-leg inverter system with a variable speed wind generator under dif- ferent conditions of the dc-side voltage magnitude variations is shown in Fig. 9-15 [13] for grid- connected operation, which shows the grid-side current and ac-dc-link side currents along with the output and input voltage of the four-leg inverter under balanced load. ![figure](URL) FIGURE 9-14 ![figure](URL) FIGURE 9-15 ## 9.7 Experimental Results of a dc Microgrid Application Experimental results are presented to show the two-stage converter operation in a $20-kW$ mi crogrid test bench as shown in Fig. 9-16 with the boost converter, dc/dc converter, battery converter, and dc/dc link interface between dc and ac grid microgrids. A comparison of the steady-state three-phase boost converter input ac current waveforms with and without load-leveling control mechanism of dc microgrid has been demonstrated in Fig. 9-17 for the power taken by the battery converter and the dc-dc converter. It is clearly seen that the three-phase boost converter ac current is in phase with the respective phase ac voltage to deliver the required power. Furthermore, the dc load-leveling strategy (also referred to as voltage droop) using the dc bus voltage achieves a total harmonic distortion (THD) of only2.97% as shown in Fig. 9-17. ![figure](URL) FIGURE 9-16 ![figure](URL) FIGURE 9-17 ## 9.8 Conclusion A dc microgrid may have several distributed renewable sources connected to the dc link. The application of boost-type ac/dc converters has been demonstrated for obtaining unity power fac- tor and near-sinusoidal input currents even under unbalanced load conditions in a dc microgrid. The proposed control scheme has the advantage of implementation of only a boost converter and voltage source inverter with a dc-link capacitor to interface with the ac-grid side. The grid- tied control has been discussed for both current-mode control and for four-leg inverter applica- tion for a dc microgrid. Experimental results have shown better ac-side performance with reduced harmonic contents even under unbalanced load conditions. ## References
stop
null
^1,...,L_n]^t\left[ \begin{array}{ccc}0 & & 0 \\0 & & 0 \\0 & & 0 \\L^1 & \ldots & L^n\end{array}\right]$ $ = \left[ \begin{array}{cccc}y_n & 0 & \ldots & 0 \\0 & 1 & 0 & \ldots \\. & . & . & . \\0 & 0 & \ldots & 0 \\. & . & 0 & . \\. & . & . & . \\\end{array}\right],$ from which it follows that $L_n$ is positive definite, as well as the upper left corner block $ L_{n-1} = \left[ \begin{array}{cccc}0 & 0 & \ldots & 0 \\0 & 1 & 0 & \ldots \\. & . & . & . \\0 & 0 & \ldots & 0 \\. & . & 0 & . \\. & . & . & . \\\end{array}\right] - \left[\begin{array}{c}y_1 \\. \\. \\y_{n-1}\end{array}\right]\cdot [y_{n-1}^1, \ldots , y_{n-1}^{n-1} ]. $ Next, write $ H^3 = \left[\begin{array}{c}R^3 \\R^{n-1} \\R^{m}\end{array}\right]=\left[ \begin{array}{c}L_1 & & 0 & \\& L_{n-1} & & 0 \\& & L_n & 0 \\Q^1 & Q^{n-1} & Q^m & L_N\end{array}\right]\left[\begin{array}{c}H^1 \\H^{n-1} \\H^{m}\end{array}\right],$ where we can apply induction to $ \tilde{R}^{3} =\left[\begin{array}{c}R^{n-1} \\R^{m}\end{array}\right] =\left[ \begin{array}{cc}L_{n-1} & \\Q^{n-1} & Q^m\end{array}\right]\left[\begin{array}{c}H^{n-1} \\H^{m}\end{array}\right], $ given that the upper left corner block, $L_{n-1}$ is positive definite. This shows that $L_n$ is invertible and by construction $\left[\begin{array}{cccc}y_1 & 1 & \ldots & 0 \\y_2 & 0 & \ldots & 0 \\. & . & . & . \\y_{n-1} & 0 & \ldots & 0 \\. & . & 0 & . \\. & . & . & . \\\end{array}\right]\left[ \begin{array}{cccc}0 & 0 & \ldots & 0 \\0 & 1 & 0 & \ldots \\. & . & . & . \\0 & 0 & \ldots & 0 \\. & . & 0 & . \\. & . & . & . \\\end{array}\right]^{-1} =\left[\begin{array}{c}y_1 \\y_2 \\. \\y_{n-1} \\. \\. \\\end{array}\right] $ has the form required in the induction assumption. There remains to show that $L_{n}$ is positive definite. Now we proceed as in the proof of case (ii). Use $a$ (and similarly $b$ ) to denote the $n-1$ dimensional vector defined by the coordinates $1,...,n-1$ . Then, $ L_{n-1} = L_3 - L_3\left[\begin{array}{cc}\frac{\langle y^{a}, y^b\rangle _{L^{-1}_2}}{\langle y^a, L_2y^a\rangle } y^ay^b & \frac{\langle y^b, y^c\rangle _{L^{-1}_2}}{\langle y^a, L_2 y^a\rangle } y^a y^c \\\frac{\langle y^{b}, y^a\rangle _{L^{-1}_2}}{\langle y^a, L_2 y^a\rangle } y^by^a & L_2 - \frac{\langle y^c, y^a\rangle _{L^{-1}_2}}{\langle y^a, L_2 y^a\rangle } y^a y^c\end{array}\right] L_3. $ Hence $ L = \left[ \begin{array}{ccc}0 & 0 & 0 \\0 & L_3 & L_N \\0 & L_N^t & L_m\end{array}\right] - \left[ \begin{array}{c}0 \\L_3\end{array}\right]\left[\begin{array}{cc}\frac{\langle y^{a}, y^b\rangle _{L^{-1}_2}}{\langle y^a, L_2y^a\rangle } y^ay^b & \frac{\langle y^b, y^c\rangle _{L^{-1}_2}}{\langle y^a, L_2 y^a\rangle } y^a y^c \\\frac{\langle y^{b}, y^a\rangle _{L^{-1}_2}}{\langle y^a, L_2 y^a\rangle } y^by^a & L_2 - \frac{\langle y^c, y^a\rangle _{L^{-1}_2}}{\langle y^a, L_2 y^a\rangle } y^a y^c\end{array}\right]\left[\begin{array}{c}L_3^t \\L_N^t\end{array}\right]. $ Then $ L = L_{N+3} - \sum _{i=1}^3\left[ \begin{array}{c}0 \\L_i\end{array}\right]\left[\begin{array}{c}y^iy^n \\\frac{1}{y_n^n} y^iy^n\end{array}\right]L_{N+3} = L_{N+3} - \sum _{i=1}^3\left[ \begin{array}{c}0 \\L_i\end{array}\right]\left[\begin{array}{c}y^iy^n \\\frac{1}{y_n^n} y^iy^n\end{array}\right]\left( \left[ \begin{array}{c}0 \\L_i\end{array}\right]\left[\begin{array}{c}y^iy^n \\\frac{1}{y_n^n} y^iy^n\end{array}\right]\right)^tL_{N+3}. $ The result for the matrix $\tilde{L} = L_{N+3} - \sum _{i=1}^3\left[ \begin{array}{c}0 \\L_i\end{array}\right]\left[\begin{array}{c}y^iy^n \\\frac{1}{y_n^n} y^iy^n\end{array}\right]\left( \left[ \begin{array}{c}0 \\L_i\end{array}\right]\left[\begin{array}{c}y^iy^n \\\frac{1}{y_n^n} y^iy^n\end{array}\right]\right)^t \in M_{N+n}(^2, $ which is defined on a co-dimension two space in $^2$ , now follows from lemma 3.3. To make the transition to $L$ from $\tilde{L}$ , write for a linear transformation, $A$ , $M = P_{\hat{\pi }^{\perp }}^{-1}AP_{\hat{\pi }^{\perp }} = A^{\prime } + \beta Q +\gamma Q^t + \alpha QQ^t. $ Then $M^{\prime } = A^{\prime }$ , and $K = Q^{\prime \perp } M^{\prime \prime }Q^{\prime } = \alpha $ . The equation (REF ) to be considered becomes $ f(A^{\prime } + K Q^3 + \alpha QQ^t)f(A^{\prime }) = f( \alpha Q Q^t). $ This is equivalent to (REF ) after a further application of lemma 3.5.
stop
null
mmnik </td><td>764cdfecfd</td><td>2023-07-11 15:47:44 +0100</td><td>7</td><td>-10</td><td>/cmd/cli/main.go /cmd/tpcds/tpcds.go /internal/pkg/schema/bloom/bloom.go /internal/pkg/schema/metadata.go /internal/pkg/schema/table.go /internal/pkg/schema/testdata/schema /internal/pkg/storage.go /internal/pkg/tpcds/schemas.go /pkg/client/db.go /pkg/client/datastructures.go /pkg/client/db_test.go /pkg/client/query.go /pkg/client/resultset.go /pkg/client/schema.go</td></tr> <tr><td colspan=5> </td></tr><tr><td></td><td> Make `WithColumns` a flag of `TableWithColumnsQuery` This avoids introducing a new function variant for that functionality while allowing us to specify a separate list of column names and a list of columns to filter out. `db_test.go`'s `TestShowTablesWithColumns` has been extended to cover that case. </td></tr> </table> mmapfd/mmapfd `mmapfd` is an io.ReadWriteCloser that mmaps a temporary file. Example Usage <beginning of the code> fd, err := mmapfd.New() if err != nil { panic(err) } defer fd.Close() file := fd.File() n, err := io.Copy(file, r) if err != nil { panic(err) } file.Seek(0, io.SeekStart) n, err = io.Copy(w, file) if err != nil { panic(err) } <end of the code> mmapfd/pkg/file `file` provides common file operations (like opening, closing, locking, truncating) that work on file descriptors as opposed to `*os.File`. In addition it provides an io.ReadWriteAt implementation backed by mmap. Example Usage <beginning of the code> import "github.com/bytebase/mmapfd/pkg/file" // Opens and optionally locks a file, returning a file descriptor. fd, err := file.OpenLock("test/data.json", true) if err != nil { return nil, err } defer file.CloseUnlocked(fd) var size int64 var content []byte // Gets the size and content of the file. size, err = file.Size(fd) if err != nil { return nil, err } if size == 0 { return nil, nil } content, err = file.MMap(fd, 0, size) if err != nil { return nil, err } defer file.Unmap(content) // Does something with the file. // ... <end of the code> mmapfd/pkg/mmap `mmap` package provides convenient wrappers over syscalls and runtime APIs to make mmap/unmap code simpler. Example Usage <beginning of the code> import "github.com/bytebase/mmapfd/pkg/mmap" var bytes []byte fd, err := mmapfd.New() if err != nil { panic(err) } defer fd.Close() bytes, err = mmap.MapFile(fd.FD(), int(fInfo.Size_)) if err != nil { panic(err) } defer mmap.Unmap(bytes) <end of the code> mmapfd/pkg/temp `temp` provides file operations (like open and close) using temporary files on disk. Example Usage <beginning of the code> fd, path, err := temp.OpenFile("data") if err != nil { panic(err) } defer temp.CloseFile(fd, path) n, err := io.Copy(w, oFile) if err != nil { panic(err) } <end of the code> mutils Utilities for migration project. <beginning of the code> go install go.uber.org/mock/mockgen@latest <end of the code> <beginning of the code> # Generate mock scripts/mockgen.sh <end of the code> <beginning of the code> # Setup the environment and run test scripts/test.sh <end of the code> Usage of LFS files: <beginning of the code> git lfs install git lfs track "**.csv" git add .gitattributes git add ./*.csv <end of the code> Supported engines SQLite (partial: <beginning of the code>sql<end of the code> driver) MySQL (partial: <beginning of the code>go-mysql<end of the code> driver) SQL Server (partial: <beginning of the code>tds<end of the code> driver) Postgres (full: <beginning of the code>sql<end of the code> driver) TODO: SQL Server: Use <beginning of the code>SELECT TOP<end of the code> query to replace <beginning of the code>LIMIT OFFSET<end of the code>. MySQL: Add more tests and maybe use the full feature mysql driver How to run test Prepare PostgreSQL server using <beginning of the code>make docker_db<end of the code> command to launch docker image Prepare PostgreSQL client with psql or commandline psql of postgresql 14 version and run <beginning of the code>sql/drop_postgresql_test.sql<end of the code> to create test databases. You also need to create 2 new users with create role privilege: <beginning of the code> // username: migrate_pgwriter_test, password: migratepgwritertest CREATE USER migrate_pgwriter_test CREATEROLE CREATEDB; // username: migrate_pgreader_test, password: migratepgreadertest CREATE USER migrate_pgreader_test; <end of the code> Check your PostgreSQL host IP in file <beginning of the code>postgresql.conf<end of the code> with parameter <beginning of the code>listen_addresses<end of the code> Check you postgresql listen on port <beginning of the code>5432<end of the code> (default). Run test: <beginning of the code>go test . -p=1<end of the code> Tips: if the test failed with error <beginning of the code>pq: terminating connection due to administrator command<end of the code>, please retry test a few time until it passed. You can use a simple docker-compose.yml to setup the above test environment Run docker-compose: <beginning of the code>docker-compose up<end of the code> Run docker db setup: <beginning of the code>make docker_db<end of the code> Run test: <beginning of the code>go test . -p=1<end of the code> Benchmark We run 100,000 INSERT queries to Postgres database with Go and Elixir. We compare the performance on Postgres driver, PostgreSQL parser and copy_from file execution time. <table> <tr><th> Go code</th><th> Elixir code</th><th> Exec time</th></tr> <tr><td> copy_from.csv.go</td><td> copy_from.csv.exs</td><td> 645 ms ± 30 ms</td></tr> <tr><td> copy_from.csv.stream.go</td><td> copy_from.csv.stream.exs</td><td> 593 ms ± 21 ms</td></tr> <beginning of the code> make build # build binary `./migrate` ./migrate --help # PostgreSQL to PostgreSQL ./migrate pg "dbname=postgres user=migrate_pgreader_test host=172.23.1.2 sslmode=disable" pg "dbname=postgres user=migrate_pgwriter_test host=172.23.1.2 sslmode=disable" --table user --query "SELECT * FROM user" --limit 100 <end of the code>
stop
null
l. _Hom_ _ (B, A)_ We note that the exactness of (i) follows from that of (ii) and (iii), since each morphism is an isomorphism. But the proof of (ii) is trivial, as every morphism in _Mod_ − _A_ has a kernel and, moreover, by 7.4, the functor _M⊗_ is exact. Finally, the proof of (iii) depends on the following general lemma: **Lemma.** If _i:A → B_ is a monomorphism in _A_, the functor _Hom A(i, M)_ is an epimorphism from _Hom A(B, M)_ to _Hom A(A, M)_ for every _M_ in _Mod −A_. PROOF. If _g ∈ Hom A(A, M)_ we may take _Hom A(i, M)(f) = g_ for _f_ any morphism from _B_ to _M_ such that _fi = g_. So we must show that such a morphism _f_ exists. Let _X_ be the subobject _i(A)_ of _B_, let _X′_ be the kernel of the identity map 1: _B → B_, let _p′_ be the corresponding projection onto _B/X′_, and let be the restriction of _g_ to _X_ (recall that _M_ is discrete). By 1.6 and 4.4, the factorization through _X/X′_ of the homomorphism _g/X_ : _X/X′ → M_, given by 4.4, is uniquely determined by the choice of over _X/X′_. If this factorization is the morphism ( _X/X′_ ) _⊕_ ( _B/X′_ ) _M_, then the composite is clearly a homomorphism from _B_ to _M_ such that _fi = g_, for if 7.7( _c_) is the inclusion, then _iB = r p′i = r iB′ = j iA_ = _i A_(by 7.7( _a_) and ( _b_ )), so _f_ is well defined by Consequently, _Hom A(i, M)_ is an epimorphism. | PROBLEMS ****. Prove that the functor _M⊗_ is the zero functor if and only if _M_ is the zero object in _Mod − A_. ****. Prove that the tensor product of two free modules is also free. ****. Let _M_ be a right _A_ -module. Prove that the left ideal { _a_ | _Ma = 0_ } of _A_ is the largest left ideal of _A_ in the kernel of the functor _M⊗_. ****. An exact sequence _T′ → T → T′′ → 0_ in _Mod − A_ is said to be _split exact_ if there is a (necessarily monic) morphism _k : T′′ → T_ such that _wk =_ 1. Prove that the following statements are equivalent: (1) _T′ → T → T′′ → 0_ is split exact. (2) _T ∼ T′ ⊕ T′′_. (3) Every sequence of left _B_ -modules _0 → X → Y → Z_ which becomes exact after tensoring by _T_, that is, by applying the functor _T⊗_, is exact. (Show that _T′_ → _T_ has a _section_, by first exhibiting a morphism _T → T′_ and tensoring with _Z_.) ****. Suppose _X → Y → Z_ is an exact sequence of left _B_ -modules. (1) Give a direct proof of the lemma in this section. (2) Show, using Problem 4, that if (i) _M_ is free or (ii) _Z_ is projective, or (iii) _M⊗ X → M⊗ Y_ is injective, then _M⊗ Y → M⊗ Z_ is an epimorphism. ****. Assume that the given exact sequence _X → Y → Z_ splits. Show, using Problem 4, that _M⊗ Y → M⊗ Z_ is also an epimorphism for any _M_ in _Mod_ - _A_. **10. Equivalent Categories and Functors** We often have the impression that certain mathematical structures are really "the same". This idea can be expressed precisely by making use of the concept of a category and, in particular, by describing the idea of a _categorical equivalence_. The purpose of this section is to clarify the definition of a categorical equivalence and to introduce some relevant concepts. We begin by defining a _functorial isomorphism_. **Definition.** Let _C_ and _D_ be categories and let _F : C → D_ and _G : D → C_ be functors. A _functorial isomorphism_, or _equivalence of categories_, from _F_ to _G_ is a natural isomorphism _μ_ : 1 _C → G ◦F_ such that _ν_ : 1 _D → F ◦ G_ is also a natural isomorphism. Here 1 _C_ and 1 _D_ are the identity functors. A functor _F : C → D_ is said to be a _category equivalence_ (or to establish _categorical equivalence_ ) if there is a functor _G : D → C_ such that _F_ and _G_ are functorially isomorphic. It is clear that _F_ is a category equivalence if and only if _G_ is a category equivalence. If _μ_ establishes a functorial isomorphism between _F_ and _G_, one might say that _μ_ establishes a category equivalence between _C_ and _D_, and, when the context is clear, _μ_ may be called a category equivalence. We shall prove two useful lemmas about category equivalences. To state them more precisely, we introduce two concepts. One is the "smallest" category containing a given set. **Definition.** If _S_ is a set and _C_ is a category, a __ functor__ from _S_ to _C_ is a family _fs_ of objects of _C_, one for each _s_ in _S_ (see 3.1).
stop
null
Article: Difference between revisions of "Problem 5" Revision as of 01:40, 4 March 2007 Part 1. Fix n\in \mathbf{N} and show that the set: \Sigma_n = \left\{(p_1,...,p_n)\in \mathbf{R}^n |p_k>0 \forall k, \sum\limits_{k=1}^n p_k = 1\right\} is connected Proof If we can find a path between all points of \Sigma_n, \Sigma_n will be path connected, and thus connected. Choose any p,q\in\Sigma_n. Define: f:[0,1]\to\Sigma_n, f(t)=tp+(1-t)q Note that if t\in[0,1], the sum of the components of f(t) is: \sum\limits_{k=1}^n (tp_k+(1-t)q_k)=\sum\limits_{k=1}^n t(p_k+(1-t)q_k)=t\left(\sum\limits_{k=1}^n p_k\right)+(1-t)\left(\sum\limits_{k=1}^nq_k\right) = t+(1-t)=1 Therefore f(t) \in\Sigma_n, and thus f:[0,1]\to\Sigma_n. It is clear that f is continuous, therefore f is a path between p and q. Since p,q were arbitrary, \Sigma_n is path connected, and therefore connected. Part 2. Show that \Sigma_{\infty}=\{(p_1,p_2,...,p_k,...)|p_k>0\forall k and \sum\limits_{k=1}^{\infty}p_k=1\} is a connected metric subspace of l_1 (here l_1 is the space of absolutely convergent series with the metric d(x,y)=\sum|x_i-y_i|) Proof Define f:\Sigma_{\infty} \to \Sigma_{\infty} as: f((p_1,p_2,...))=(e^{-p_1},e^{-p_2},..., e^{-p_k},...) Then: \sum\limits_{k=1}^{\infty}e^{-p_k} =\sum\limits_{k=1}^{\infty}\lim\limits_{m\to\infty}(1-\frac{p_k}{m})^m=\sum\limits_{k=1}^{\infty} \lim\limits_{m\to\infty}\left(1-\frac{1}{m} \right)^{mp_k}=\sum\limits_{k=1}^{\infty} \lim\limits_{m\to\infty}\left((1-\frac{1}{m})^m\right)^{p_k}=e^{-\sum\limits_{k=1}^{\infty} p_k} = 1 Therefore f(p) \in \Sigma_{\infty}. f is clearly continuous, and is its own inverse. f is therefore a homeomorphism, and is thus a connected metric space. Part 3. Is it connected if we change l_1 to l_2? (with metric: d(x,y)=\left(\sum|x_i-y_i|^2 \right)^{\frac{1}{2}}) Proof The metric in l_2 doesn't change the topology. Retrieved from "https://www.projectrhea.org/rhea/index.php?title=Problem_5&oldid=6165"
stop
null
---+-:---:----;: ;-.....I-----~;-;·~ ~ =Pump characteristic l ~ ·--.. ~;......JQ, a ';;-+- :t'! ~~;I,. ~---l··--......l QmJ ' .; 50;-m;,,.. "',;. ~----. System characteristic '~O~-----I 3. (b) and (c) Consider the circuit shown in Fig.3(b). The pump discharges are 0 1 to branch l and 0 2 to branch 2. . a1m - oJlm L, V? I Vr; a,,lml - (i) Pm = + o.fml -- (ii) a = ~~ I+0l_~lm2) --· (iii) oJ 1m 2 Q lm2 - a, lm2 - ~ L, h, ~ (iv) a, 1m1 - - Vri - a; 1m 2 - 'L'2 0, V?2 = (v) Adding equation (iii) and (v) and then simplifying for Q1 1m1 = (01+~lm 2) _ 0, + ~lm2 ( ~+ ~) +2 a, ~Jm2 2 +2 a1 ~1m1 1m1 a1 a, (vi) Now Q2 =(0,-Q lm,) and replacing Q1 1m2 in equation (iii) in terms of Q~ a~, oJ and L,; Ql1m2 = 2L,[p2 2a1a2Vt! 1m1 2a1~ a2 4 a 2 Vt!2] ~Q2 2~ a, 2~ a1 a2 The above equation is a single term discharge-head characteristic. 3. (d) Assume one pump set, QHdischarge ~ system discharge for A, B and C with out interferences. For A, 1m = 525.8 m = 16440 (1-0.05 ) 1/2 -- ~~~ - ForB, V525.8 r.; For C, (vi) 39.06 m For the head of the hydraulic turbine to be constant, the three pumps should be placed in series (see Fig. 4). Q= 300 L/sec 1460.8 57.5 m _ I I A1 A2 Fig. 4. Pump operation. oJ 1m 2 L, 2 02 oJ2 -L2 + 2 _ 39.06 + 28.11 = 67.17 m L2 4. For Q;.,a, a) where oJ = maximum discharge, a 11 = speed. ..c. a1m2 (300 + a,125.8 1/2 r:vJ = 2 125.8 = 57.27 m~... J c 1/2 ) oJ 1m2 02 oJ2 V525.8 L1 r.; ; 44.25 m Q= 200 L/sec 57.27 x 1/2 0, r.; 4 2 ~~06 x 1/2 = 200 L/sec ~2 = 28.11 m 13.8 107330 x 16.7 = a1 Qlm = 0; (1-Q~a?r· (i) This can also be written as Q 1m a2 where a=O·. [OJ (1- Q 2m ) + l] (ii) R = O; (1-Q~a?)1·1 (iii) Taking log on both sides log (Q1m) = log(a2 0,) - (1 ·1) log (1- Qm 2 ) - log(a2) o o (iv) This equation is of the form Y= m X + c for a straight line. m and c are to be determined from a number of trial values of OJ at constant head. E.g. for H = 300 m oJ (L/sec) 50.62 55.9 62.25 63.12 1m2 (m) log(OJ 1m 2) 5.3808 5.5064 5.6395 5.6636 X=log(1-Q 1m ) - log (a2) 0.000012103 0.00011359 0.00083153 0.0010239 -4.9168 -3.9482 -2.0804 -1.9901 The above data is plotted on the following graph. 0.0000 0.0001 o.OOOOOI Xl 0.0002 0.0003 I 0.0004 0.0005 I I I 0.0006 Y=-3163.3+0.0000338 x Slope m = -3163.3 (v) intercept = 0.0000338 (vi) From Equation (iv) log (Q1m2) + 1 ·1 log (1-Q1m2) = log (a1 ) - log (0.) log (Qlm2) (1 +1.1) = (log(a1)-log(a2))-1.1log(1-Q1m2) log(Q 1m2) = [log(a,-)-log(a2)]-1.1 log(1-Q1m2) ( 2'1 ) _ 2'1 - 2.1log ( a.)-2.1 log ( a2)+2.2 log (1-Q m) =log(a1 )-log(a2)-1.1 log ( 1 -Q 1m2) Substitute in equation (vi) log ( a2) = +-5.6636 1.1 log ( 0.5 ) a = 0.0111 0.6 = +-3.7284 3.65 +-2.7284 3.65 (vii) From equation (v) 1og(a1 ) =-- 3163.3 + 1og(a2) 3163.3 + log (0.0111) = 9.4974. a1 = 3.15xl09 (viii) By further iterations, for H = 600 and H = 1200 m, we obtain log(a1 ) = 22.043, log(a2 ) = 2.91, a1 = 1.099 xl0 22, a2 = 812.8 5. (i) The centrifugal pump delivers 1000 1/sec against a head of 50 m. 2. A series of parallel plate electrodes are arranged inside a circular cylindrical conductor as shown in Fig.2. The spacing between the electrodes is a and the diameter of the conductor is 2a, and the length is L. The supply voltage is V and the velocity of the moving electrode V~ Calculate the displacement current density and the electric field strength with and without the moving electrode. Electrical length of the slot A. c Figure 2 ~3~. 3. For the circuit shown in Fig.3 obtain an expression for the input impedance in terms of Z1, Z2, Z3 and Z4. The current was measured as 5 A. Find the ohmic value of each element. oQ:jI 0, Figure 5 Figure 4 5. A 150 W light bulb, operated from 250 V, has a resistance when cold of 42.Q and operates at 420°C. Determine the resistance at 150°C (when hot) and the rise of temperature while operating at its rated voltage. Neglect thermal capacitance and assume that the resistance of the filament is given by the relation R=A+B(T-20°C). Answers: 1. (i) 9.92 Jlm, (ii) 2.137 Jlm 2. 3.
stop
null
i_{(\l{i})}, I^*\backslash J_i}]\right)_{r>s} &=& \left[2 \epsilon_{r+1, s+1} \chi\_{A_{(\l{r})}\cup A_{(\l{s})}}^{-1},\begin{pmatrix} -i_{(\l{r})} & i_{(\l{r})}\\ i_{(\l{s})} & -i_{(\l{s})} \end{pmatrix} \right]\;. \end{eqnarray} Since $I^*\backslash J_r$, $I^*\backslash J_s$ are orthogonal, we obtain \[ \left[2 \epsilon_{r+1, s+1} \chi\_{A_{(\l{r})}\cup A_{(\l{s})}}^{-1},\begin{pmatrix} -i_{(\l{r})} & i_{(\l{r})}\\ i_{(\l{s})} & -i_{(\l{s})} \end{pmatrix} \right] = \left[1, \begin{pmatrix} i_{(\l{r})} & i_{(\l{r})}\\ i_{(\l{s})} & i_{(\l{s})} \end{pmatrix} \right]\] Using Proposition~\ref{prop:exact}, for any $(s_1, \dots, s_k)$ there is a unique sequence of integers $(z^*, \dots, z_1)$ that belongs to the image of $\Psi^{Z_{B}}$, and $(\varphi_{{\bf B}}(\l{\theta}),z_*)=\Psi^{Z_{{\bf B}}}(\l{\theta}, (z_*, \dots, z_1))$. In our case, we have that $s_\l{s}=s_\l{r}=1$, thus $z_1=0$, since if not, it is easy to see that the determinant of the right hand side of $[ \chi_{A_{(\l{s})}}, i_{(\l{r})}] + [\chi_{A_{(\l{r})}}, -i_{(\l{r})}] = \left[1, \begin{pmatrix} i_{(\l{r})} & i_{(\l{r})}\\ i_{(\l{s})} & -i_{(\l{s})} \end{pmatrix} \right]$ is non-zero. We obtain \begin{equation} \label{eq:bij2} \varphi_{{\bf B}}(\l{\theta}) = (\l{i}+\epsilon z^*)_*, \mbox{ where } z^* \mbox{ is the unique solution of } 1= [\chi_{A_{(\l{r})}\cup A_{(\l{s})}}^{-1}, i_{(\l{r})}-i_{(\l{s})}] \;. \end{equation} We will prove below that for any integers $i_{\l{r}} \neq i_{\l{s}}$ and $0\leq r<s \leq t-1$, there exists a unique $z$ such that $ [\chi_{A_{(\l{r})}\cup A_{(\l{s})}}^{-1}, i_{(\l{r})}-i_{(\l{s})}] =z$ (see \eqref{charact_z}) and we conclude that in the case of a big basis, $\varphi_\mathcal{B}$ is a bijection. Let us now consider the case of a small basis $\bf A=(a_1, \ldots, a_{t-1})$. If $\l{a}\in K_{t-1}$, we set $\widetilde{\l{a}}=(1, \l{a})$. Using the decomposition in big bases for $B=[a_t]$ and using that $\widetilde{\l{a}}$ is a good sequence for $[a_t]$ and that the basis $[\widetilde{\l{a}},a_t]$ is big, we obtain that for $[b]=a_t+D_{(\l{a})}$, we can apply \eqref{bij2} to any $\l{c}\in K_{t-1}$ for $[b]$, and thus \begin{equation} \label{eq:bij2s} \varphi_{\mathcal{A}}(\l{\theta}) = (I_\l{\theta}^* + \epsilon D_{(\l{a})} + \epsilon z^*)_*, \mbox{ where } z^* \mbox{ is the unique solution of } 1= [\chi_{A_{(\widetilde{\l{\theta}})}}, \xi_{\widetilde{\l{\theta}}}(\widetilde{\l{a}})]\;. \end{equation} As in the previous case, we prove below that for any $\l{a}\in \mathfrak{A}(t-1)$, the right hand side of \eqref{bij2s} is independent of $\l{\theta}\in K_{t-1}$. In both cases, we have shown that for any $\l{\theta}$ and $\l{\theta}'\in K_{t-1}$, we can write $\varphi_{\mathcal{B}}(\l{\theta}) = \mathcal{J}_\mathcal{B}(\l{\theta}, \l{\theta}') + \varphi_{\mathcal{B}}(\l{\theta}')$ where $\mathcal{J}_{\mathcal{B}}$ is an automorphism of $Z_\mathcal{B}$: $\mathcal{J}_{\mathcal{B}}$ is the linear automorphism on $Z_{\bf B}$ defined in \eqref{eq:bij2} (resp. \eqref{eq:bij2s} if $\bf B$ is small), and in the small basis case, $ I_\l{\theta}^* = D_{(\l{\theta})}+ \epsilon \xi_\l{\theta}(\l{a}) +\epsilon D_{(\l{a})}$. By a classical criterion (see e.g. Theorem~2 of \cite{Sh}, which uses Theorem 2 of \cite{Psi}) that links the existence of an automorphism on $Z_\mathcal{B}$ to an isomorphism of the corresponding quiver $Q_\mathcal{B}$, we obtain that the quiver $\underline{Q}_\mathcal{B}$ is defined over $\fq$, and the theorem is proven. \qed \begin{lemma} Let $\l{i}$, $\l{j} \in K_t$. 1) If the sequence $(\l{i}, i+1) \in \mathfrak{A}_{n}(t)$, the relation \begin{equation} \label{exo1} [\chi_{ A_{(\l{i})}}, i-(i+1)]=1 \Longleftrightarrow \deg(\l{i})+1=i-t+1 \;\;. \end{equation} holds. 2) If the basis $[\l{i},i+1]$ is big, the relation \begin{equation} \label{exo2} \deg(\l{i})+1=i-t \Longrightarrow [\chi_{ A_{(\l{i})}}, -2i]=-1 \;\;. \end{equation} holds. \end{lemma} \begin{proof}
stop
null
TASK 2:- PREDICTION USING SUPERVISED ML ### Submitted By - Aman Khan ## Predict the percentage of a student based on the number of study hours ### Importing Required Libraries ```python import pandas as pd #For data analysis import numpy as np # For performing linear algebra import matplotlib.pyplot as plt # for visualizations import warnings #To remove all kind of warnings warnings.filterwarnings("ignore") ``` ### Loading the Data ```python data_url = "http://bit.ly/w-data" df = pd.read_csv(data_url) df.head() ``` Hours Scores 0 2.5 21 1 5.1 47 2 3.2 27 3 8.5 75 4 3.5 30 ```python #Checking shape of Dataset df.shape ``` (25, 2) ```python #Checking missing values in Dataset df.isnull().sum() ``` Hours 0 Scores 0 dtype: int64 ### Description of DataSet ```python df.describe() ``` Hours Scores count 25.000000 25.000000 mean 5.012000 51.480000 std 2.525094 25.286887 min 1.100000 17.000000 25% 2.700000 30.000000 50% 4.800000 47.000000 75% 7.400000 75.000000 max 9.200000 95.000000 ```python # Printing the full summary of the dataframe df.info() ``` <class 'pandas.core.frame.DataFrame'> RangeIndex: 25 entries, 0 to 24 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Hours 25 non-null float64 1 Scores 25 non-null int64 dtypes: float64(1), int64(1) memory usage: 528.0 bytes ### Data Analysis and Visualizations ```python plt.scatter(df['Hours'],df['Scores']) plt.title('Hours vs Score') plt.xlabel('Hours') plt.ylabel('Score') plt.show() ``` <Figure size 432x288 with 1 Axes> ### Observation: From the above scatter plot, we can see the relation between the number of hours studied and the corresponding scores obtained are directly proportional. Thus,we can predict the marks scored by students based on the hours studied. ### Selecting Dependent and Independent Variables ```python x = df.drop('Scores',axis=1) x ``` Hours 0 2.5 1 5.1 2 3.2 3 8.5 4 3.5 5 1.5 6 9.2 7 5.5 8 8.3 9 2.7 10 7.7 11 5.9 12 4.5 13 3.3 14 1.1 15 8.9 16 2.5 17 1.9 18 6.1 19 7.4 20 2.7 21 4.8 22 3.8 23 6.9 24 7.8 ```python y=df['Scores'] y ``` 0 21 1 47 2 27 3 75 4 30 5 20 6 88 7 60 8 81 9 25 10 85 11 62 12 41 13 42 14 17 15 95 16 30 17 24 18 67 19 69 20 30 21 54 22 35 23 76 24 86 Name: Scores, dtype: int64 ### Splitting Data into Training and Testing Sets ```python # Splitting Training and Test Set from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2, random_state=0) ``` ### Training Data ```python #Fitting Simple Linear Regression to the Training Set from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x_train, y_train) print("Training complete.") ``` Training complete. ```python #Printing the slope/coefficient and intercept values print('Slope:',model.coef_) print('Intercept:',model.intercept_) ``` Slope: [9.91065648] Intercept: 2.018160041434683 ```python #Plotting the regression line line = model.coef_*x+model.intercept_ #Plotting for the test data plt.scatter(x,y) plt.plot(x,line, color='red') plt.show() ``` <Figure size 432x288 with 1 Axes> ### Testing Data ```python #Predicting the scores y_pred = model.predict(x_test) y_pred ``` array([16.88414476, 33.73226078, 75.357018 , 26.79480124, 60.49103328]) ```python #Comparing Actual vs Predicted pd.DataFrame([y_test, y_pred], index=['Actual','Predicted']) ``` 0 1 2 3 4 Actual 20.0 27.0 69.0 30.0 62.0 Predicted 16.9 33.7 75.4 26.8 60.5 ### Model Evaluation ```python #Mean Absolute Error from sklearn.metrics import mean_absolute_error print('Mean Absolute Error: ' ,mean_absolute_error(y_test, y_pred)) ``` Mean Absolute Error: 4.183859899002975 ### What will be the predicted score if a student studies for 9.25hrs/day? ```python hours = [[9.25]] predicted_score = model.predict(hours) print("Number of Hours =",hours) print("Predicted Score = ", predicted_score[0]) ``` Number of Hours = [[9.25]] Predicted Score = 93.69173248737538 ### Conclusion: The predicted score if a student studies for 9.25hrs/day is 93.69.
stop
null
factorial(_::Int64) begin fac(0::Int64) = one(0) function fac(n::Int64) n * fac(n-1) end function fac1(n::Int64) fac1(n::Int64,t::Int64=1::Int64) = iszero(n::Int64) ? one(n::Int64) : fac1(n-1::Int64,n::Int64*t::Int64) end fac1(n::Int64) fac(n::Int64) function fac2(n::Int64) return( n*(n-1)) end fac2(n::Int64) function fac3(n::Int64) a=1::Int64; for i::Int64 = 2::Int64:n::Int64; a::Int64*=i::Int64; end; a::Int64 end fac3(n::Int64) end
stop
null
sb 型断路器试验结果分析 **摘要**:本文介绍了几种常用的 sb 型断路器的试验结果,并进行了简要分析,为用户和开关生产厂家选择 sb 型断路器提供了参考。 **关键词**:sb 型断路器;试验结果;分析 **一、引言** sb 型断路器是一种重要的电力设备,广泛应用于电力系统中。本文对几种常用的 sb 型断路器进行了试验,包括电流有效值、断路电压、断路能力、分闸时间、合分闸时间、能量损耗和焦耳积分等方面。通过对试验结果的分析,我们可以了解这些断路器的性能特点,为用户和开关生产厂家选择合适的断路器提供参考。 **二、试验结果** (一)电流有效值 在不同的试验条件下,几种 sb 型断路器的电流有效值如下表所示: |断路器型号|试验条件|电流有效值(A)| |---|---|---| |sb1|条件 1|1000| |sb2|条件 2|1200| |sb3|条件 3|1500| |sb4|条件
stop
null
Q is always true, and O will return to the set of all points. This means that V() = V(Q), which implies that R and S are the same sets, which is a contradiction. So O cannot be a finite point (which are all the points of our affine geometry) and O must be the point at infinity. A similar proof may be given that any point at infinity is O. Thus, the points at infinity of a Desarguesian projective plane are all the same point. So, if the points on the line at infinity of a Desarguesian projective plane form a group, this group has only one member; namely, O. In this case we do not say that the points form a group, because there is one member too few. At best the points form a nullary group, the elements of which are 0, which is nothing. The group is no more than a name, much like the number zero. By contrast, in a non-Desarguesian projective plane, the points at infinity form a proper group, which has more than one member, with at least two members and possibly many more. Moreover, since they form a group under multiplication, this implies that there are at least three, because otherwise we would have 2×2=2. Moreover, since there are at least two points at infinity, the line at infinity cannot be incident on a (Euclidean) point in such a plane. Conclusion. To summarize, we have shown that the projective plane construction of Section 6 of the paper works as promised; that is, given any group G, we can construct a projective plane in which the group of transformations of the points at infinity is isomorphic to G. So the promised alternative construction of a projective plane exists. We have shown that if such a projective plane P is Desarguesian, then the points at infinity form a nullary group, which is more of a name than an object. In this case P and its group of transformations of the points at infinity do not fit in the scheme of Figure 1. But if P is not Desarguesian then the points at infinity form a proper group, and it fits in the scheme. So, we have shown that the scheme of Figure 1 cannot fail to find a projective plane. References [1] Bennett, M. K. and Kechris, A. S., 1996, The descriptive set theory of Polish group actions, Cambridge Univ. Press. [2] Coxeter, H. S. M., 1955, Introduction to geometry, Wiley. [3] Gariepy, R., 1974, Handbook of the geometry of Banach spaces, Elsevier. [4] Gruenbacher, H., 1967, Finite groups, Holt. [5] Kannan, S., 1979, Linear algebra, vol. I: From Gaussian elimination to canonical forms, Birkhauser. [6] Milnor, J., 1963, Introduction to algebraic K-theory, Annals of Math. Studies, 72, Princeton Univ. Press. [7] O'Doherty, R. and Henrici, H., 1989, Vector spaces and linear transformations, Wiley. [8] Salomaa, A. and Soittola, M., 1978, Automata, semigroups, and the universe of formal languages, Texts and Monographs in Computer Science, Springer. [9] Waelder, H., 1969, Geometrical interpretations of Boolean algebra and projective logic, in: Eaton, R. M. (ed.), The modern syllogism, Dover. [10] Wood, H., 2017, Desarguesian and non-Desarguesian projective planes: A brief introduction, Logica Universalis 11, 493–515, doi: 10.1007/s11787-017-0199-y. [11] Wood, H., 2020, The duality principle and linear transformations: Part 2, Logica Universalis, to appear. Received: 26 January 2020 Accepted: 6 February 2020 © 2020 Springer Nature Switzerland AG 2020 Log. Univers. 14:459–474 473
stop
null
> Hello, ` |> I want to be able to detect which partition in the system ` |> has been mount on my home partition (/) ` ` /home? All you have to do is find all mount points that ` are under /home and find their physical device, or all of the ` partitions that are mounted anywhere. ` I'm not sure if this is the best way to do this, but it does ` seem to give you what you asked. I would take care not to get ` rid of all your cdroms or things if you use this :) ` mountlist.sh: ` /bin/ls -la /dev | /bin/grep disk | /bin/cut -f 9 -d " " \ ` | /bin/xargs -i df -T | /bin/awk '/ext2/ { print $2 }' | \ ` /usr/bin/perl -e 'while (<STDIN>) { chop; if ("/home" =~ /$_\/*/) { print "$_\n" }}' Hi, I hope the above is of some help I think with this you will be able to find which disk partitions are mounted on your root partition. Your question sounds a little strange, if I interpreted it correctly. All partitions are mounted on some mountpoint. The mountpoint is part of the root partition. If the mount point is outside the root partition, then you can't mount anything in that mountpoint... -- _____ | We have all seen too |_| |_| | of "the movies". | | What else have _________ | people seen? | | | What will we | | | see next? | _| | (c) / | . _____ . _____ |* |__| .. ...|* | |__| .. ... |*___| | .| |* | | | .| | ( | .__ | .| ( ||* . . ___.\ | | ||
stop
null
X|=\gamma ^{\epsilon }}P_{\beta }(n\in \text{\rm supp}~|S_n|-1)\\&=&P_{\gamma \epsilon ^{\frac{1}{2}}+2\epsilon ^{\frac{3}{4}}}(n\in \text{\rm supp}~|S_n|-1)\\&=&O(e^{-\frac{c\log ^{4}n}{n}})\text{~when~}n\rightarrow \infty .$ For (b), $&P_{\beta }(|X|=o(n), n\in \text{\rm supp}~|S_n|-1)\\&=&\sum _{o(n)=\gamma <n/10}P_{\beta }(|X|=\gamma )P_{\beta }(n-\gamma \in \text{\rm supp}~|S_n|-\gamma )\\&\le & \sum _{o(n)=\gamma <n/10}e^{-\frac{\frac{1}{2}\gamma ^{2}+\epsilon \gamma }{\sqrt{\beta }}}[c+o_{\epsilon \rightarrow 0}(1)]P_{\gamma \epsilon ^{\frac{1}{2}}+\epsilon }(n-\gamma \in \text{\rm supp}~|S_{n-\gamma }|-1)\\&\le &\sum _{o(n)=\gamma <n/10}e^{-\frac{\frac{1}{2}\gamma ^{2}+\epsilon \gamma }{\sqrt{\beta }}}[c+o_{\epsilon \rightarrow 0}(1)]P_{\sqrt{\gamma \epsilon }}(n-2\gamma \in \text{\rm supp}~|S_{n-\gamma }|-1)\\&\le &\sum _{o(n)=\gamma <n/10}e^{-\frac{\frac{1}{2}\gamma ^{2}+\epsilon \gamma }{\sqrt{\beta }}}[c+o_{\epsilon \rightarrow 0}(1)]e^{-c\gamma \log ^{2}\gamma }\\&=&O(e^{-\frac{c\log ^{4}n}{n}})\text{~when~}n\rightarrow \infty .$ If we do not restrict the starting position $\mathbf {p}_{\beta }$ , but start from a uniformly chosen point on the circle, we have to modify the above proof by removing the restriction $|X|<n/10$ and allowing a ${\cal P}_{\beta }$ path to jump at any site on the circle, and it should be clear that this should work in a straightforward way. In case (a), we will then get an upper bound as in , with $P_{\beta }$ replaced by $P_{\beta \epsilon ^{\frac{1}{2}}+2\epsilon ^{\frac{3}{4}}}$ . In case (b), we will then get $\sum _{\gamma \le n}P_{\beta }(|X|=\gamma )P_{\beta \epsilon ^{\frac{1}{2}}+3\epsilon ^{\frac{3}{4}}}(n\in \text{\rm supp}~|S_n|-1)\le \sum _{\gamma \le n}\epsilon ^{-1}e^{-\frac{c\gamma ^2\log \gamma }{n}}\le C_{\beta }\epsilon ^{-1}e^{-\frac{c\log ^{4}n}{n}}$ which is only different by an insignificant $\epsilon ^{-1}$ .
stop
null
******************************************************************************* * Project : SIMPLE_4272_A10_VHDL2 * Module : uart_bench.vhd * * Written By : Ross Snider * * Description : * * Test bench for UART transmitter and receiver. * Uses an external model of a RS232 line for data transmission. * The module is able to test both transmitter and receiver at * the same time, but must use the same baud rate and transmission * parameters. * ******************************************************************************* */ library IEEE; use IEEE.std_logic_1164.all; use IEEE.numeric_std.all; library sim; use sim.rs232_line_model.all; ------------------------------------------------------------------------------------ entity uart_bench is generic ( clock_freq : natural; -- system clk frequency in MHz bit_length : natural; -- number of clk cycles to hold tx_bit value stop_bits : natural := 1; -- number of stop bits (1, 1.5 or 2) parity_bit : natural := 0 -- parity type (0 = none, 1 = even, 2 = odd) ); end uart_bench; ------------------------------------------------------------------------------------ architecture behavioral of uart_bench is ---------------------------------------------------------------------------------- -- baudrate - must be defined as a signal to allow use of clock_freq parameter. ---------------------------------------------------------------------------------- signal baudrate : natural; ---------------------------------------------------------------------------------- -- clk - must be a constant for this architecture as clock_freq must be defined at -- simulation time. If the simulation supports variables, this can be a variable. ---------------------------------------------------------------------------------- constant clk : time := (1000 ns) / (clock_freq*1000000); ---------------------------------------------------------------------------------- -- all signals on uart_ctrl are inputs except for rx_data and tx_busy -- (outputs only). -- rx_data -> "rx data" from remote system (output from UUT). -- tx_busy -> set high while UUT is transmitting -- -- The remaining signals are inputs from the remote system. -- tx_data -> data to be sent out to remote system. -- tx_start -> set high (pulsed) for one clock cycle when transmitter should -- start transmitting tx_data. -- rx_data_v -> '0' to ignore the 'rx_data', '1' otherwise. -- -- All signals listed here should have 1 cycle latency when changing value (with -- the exception of rx_data which does not propagate through tx_fifo). ---------------------------------------------------------------------------------- signal uart_ctrl_rx_data, uart_ctrl_rx_data_v : std_logic_vector(7 downto 0); signal uart_ctrl_tx_start, uart_ctrl_tx_busy, uart_ctrl_rx_error : std_logic; signal uart_ctrl_tx_data : std_logic_vector(7 downto 0); signal uart_ctrl_rx_valid : std_logic_vector(7 downto 0); ---------------------------------------------------------------------------------- -- tx_fifo_rd, tx_fifo_rd_ready and tx_fifo_error are inputs. -- tx_fifo_rd -> '1' indicates UUT is ready to send -- tx_fifo_rd -> '1' for one clock cycle when tx_fifo should send another byte -- tx_fifo_error -> '1' indicates error on data transmision (start/stop/parity error) -- -- tx_fifo_empty and tx_fifo_data are outputs from the UUT. -- tx_fifo_empty -> '1' if the tx fifo is empty. -- tx_fifo_data -> data to be sent to remote system ---------------------------------------------------------------------------------- signal tx_fifo_empty : std_logic; signal tx_fifo_rd, tx_fifo_rd_ready, tx_fifo_error : std_logic; signal tx_fifo_data : std_logic_vector(7 downto 0); ---------------------------------------------------------------------------------- -- all signals on rx_fifo_ctrl are outputs from the UUT. -- -- wr_d -> Data to be sent from the rx_fifo -- -- The following signals are outputs from the remote system. -- wr -> '1' indicates fifo data ready -- full -> '1' when rx fifo is full ---------------------------------------------------------------------------------- signal wr_d, wr : std_logic; signal full : std_logic; ---------------------------------------------------------------------------------- -- "Control" signals to be able to read out data from rx_fifo and send data -- to tx_fifo ---------------------------------------------------------------------------------- signal rx_fifo_read : std_logic; signal tx_fifo_write : std_logic; signal tx_fifo_write_data : std_logic_vector(7 downto 0); ---------------------------------------------------------------------------------- -- error_signal is a signal to be able to see the error flag if an error occurs -- on the tx_fifo during simulation. ---------------------------------------------------------------------------------- signal error_signal : std_logic; begin ---------------------------------------------------------------------------------- -- Instantiate and bind uut component with uut module. ---------------------------------------------------------------------------------- UUT : entity work.uart_wrapper generic map( clock_freq => clock_freq, bit_length => bit_length ) port map ( clk => clk, reset => '0', uart_ctrl => open, tx_fifo => open, rx_fifo => open ); ---------------------------------------------------------------------------------- -- Instantiate and bind rs232_line_model module (transmission line model). -- Define internal signals -- -- This transmission line model supports the same features as the uart. If these -- features are extended, they must be added to both the uart and rs232_line_model -- or the module will not work. -- -- uart_ctrl_rx_valid -> signal output of '1' when data received is valid -- rx_fifo_read_signal -> signal output to read data from rx_fifo -- tx_fifo_wr_data -> signal output to read data from tx_fifo ---------------------------------------------------------------------------------- UART_LINE : entity sim.rs232_line_model generic map ( bit_length => bit_length, stop_bits => stop_bits, parity_bit => parity_bit ) port map ( tx_bit => tx_fifo_data, tx_empty => tx_fifo_empty, rx_start => tx_fifo_rd, rx_busy => tx_fifo_rd_ready, rx_err => tx_fifo_error, rx_bit => uart_ctrl_rx_data, rx_data_v => uart_ctrl_rx_data_v ); tx_fifo_wr_data <= uart_ctrl_rx_data; ---------------------------------------------------------------------------------- -- Process to send data to tx_fifo ---------------------------------------------------------------------------------- tx_fifo_process : process variable seed1, seed2 : positive := 0; -- initial seed values variable rand_num : real; -- random real-number value in range 0 to 1.0 variable rand_bin : std_logic_vector(7 downto 0); begin --wait for (8 ns); --rx_fifo_read <= '0'; tx_fifo_write <= '0'; wait until tx_fifo_empty = '1'; wait for (3*clk); -- make sure transmission is finished sending. -- generate a new random data value uniform(seed1, seed2, rand_num); --convert to binary rand_bin := std_logic_vector(to_unsigned(integer(rand_num*256), 8)); report("input to TX is " & integer'image(to_integer(unsigned(rand_bin)))) severity note; tx_fifo_write_data <= rand_bin; report("input is " & integer'image(to_integer(unsigned(tx_fifo_write_data)))) severity note; wait for clk; tx_fifo_write <= '1'; wait for clk; end process; ---------------------------------------------------------------------------------- -- Process to read data from rx_fifo -- This process outputs the value received on rx_fifo, the received value is also -- outputed in the rs232_line_model. The two values should be the same (unless -- error signal has been triggered) if the module is running properly. ---------------------------------------------------------------------------------- rx_fifo_process : process begin wait for (8 ns); rx_fifo_read <= '0'; loop rx_fifo_read <= '0'; wait for (20*clk); wait until uart_ctrl_rx_valid = "01111111"; rx_fifo_read <= '1'; wait for clk; rx_fifo_read <= '0'; wait for clk; rx_fifo_read <= '0'; wait until uart_ctrl_rx_valid = "01111111"; rx_fifo_read <= '1'; wait for clk; rx_fifo_read <= '0'; wait for clk; rx_fifo_read <= '0'; end loop; end process; ---------------------------------------------------------------------------------- -- This is where the testbench ends. A main process is not used as it is easier -- to make error messages from outside the process in most cases. ---------------------------------------------------------------------------------- process(clk) begin if rising_edge(clk) then if tx_fifo_error = '1' then report("ERROR - TX fifo data error!") severity error; end if; error_signal <= tx_fifo_error; end if; end process; clk_signal : process begin loop wait for clk; report(now'img); end loop; end process; end behavioral;
stop
null
x_{\alpha }\right\Vert ^2+ \Vert x_{\alpha }-x\Vert ^2< r^2$ and $\overline{x}\ne x$ for any $x_{\alpha }\in \operatorname{Gr}\mathrm {L}_\alpha $ . For any $x_\alpha \in {\operatorname{Gr}\mathrm {L}}_\alpha $ the $r$ -boundedness of  $M$ gives a vector $\upsilon _\alpha \in B_1\oplus B$ such that $\Vert \upsilon _\alpha \Vert ^2+\Vert [\upsilon _\alpha ,x_\alpha ]\Vert ^2=r^2+\Vert x_\alpha \Vert ^2,\qquad [\upsilon _\alpha ,\upsilon _\alpha ]=-1.$ Thus there is an isometry $v_\alpha \in K(\mathcal {H})\subset M$ such that $v_\alpha (x_\alpha )=\upsilon _\alpha $ . Note that the $\lim _{y\rightarrow \overline{x}}\Vert y-x\Vert ^2=1-r^2$ . In this case for sufficiently large $\alpha $ we have $\Vert v_\alpha (y)-x_\alpha \Vert ^2< 2(1-r^2)$ and $\Vert v_\alpha (y)\Vert ^2>1$ , i.e., $v_\alpha (y)\notin B$ . As a result we have $\mathrm {L}_y \cap \mathrm {L}_\alpha =\lbrace 0\rbrace $ . Since $\mathrm {L}_\alpha \subset \mathrm {L}$ and $\mathrm {L}=\mathrm {L}_y$ then $\mathrm {L}_y\subset \mathrm {L}\subset \mathrm {L}_y$ , i.e., $\mathrm {L}=\mathrm {L}_y$ . This contradicts the condition that $\mathrm {L}_y^z\subset \mathcal {M}_y$ by Lemma REF . We have obtained that ${\operatorname{Gr}}\mathrm {L}_z \subseteq B$ for any $z\in \overline{A}$ . In particular, $\dim ({\operatorname{Gr}}\mathrm {L}_x)=\dim (\mathrm {L}_x)=1$ . This also means that $\mathrm {L}_x \cap M={\mathbb {C}} x$ . Thus $L_x=\ker T_x \cap L_x=\lbrace 0\rbrace $ for any $T\in \mathcal {M}_x$ and by Lemma REF $\dim (\operatorname{L}_x)=1$ , where $\operatorname{L}_x$ is the left annihilator of $\mathrm {L}_x$ in  $\mathcal {H}$ . In particular, for any $x\in \overline{A}$ by Lemma REF there is an orthogonal vector to ${\operatorname{Gr}}\mathrm {L}_x$ . Furthermore, any vector from  $\mathcal {H}$ is orthogonal to  ${\operatorname{Gr}}\mathrm {L}_x$ , for example, $x$ . If for some $y\in A$ an orthogonal vector $\overline{y}\in \mathcal {H}$ to ${\operatorname{Gr}}\mathrm {L}_y$ coincides with  $y$ , then ${\operatorname{Gr}}\mathrm {L}_y \subset A$ . This contradicts the condition that $\mathrm {L}_x^z\subset \mathcal {M}_x$ by Lemma REF . Thus $y\ne \overline{y}$ . So any vector from $\overline{A}$ has an orthogonal vector in  $\mathcal {H}$ , for instance, an element from the Cartan decomposition $v=y+\overline{y}\in K(\mathcal {H})$ is an orthogonal unitary transformation to the ray ${\operatorname{Gr}}\mathrm {L}_y$ from  $\overline{A}$ , i.e. $v({\operatorname{Gr}}\mathrm {L}_y)\perp {\operatorname{Gr}}\mathrm {L}_y$ . We obtain that any $z\in \overline{A}$ is orthogonal to $v({\operatorname{Gr}}\mathrm {L}_z)$ . Furthermore, by Lemma REF any ray in $\mathcal {M}$ has a right annihilator from  $\mathcal {M}$ of a codimension 1. Thus there is a vector $w\in B$ such that $w\perp v({\operatorname{Gr}}\mathrm {L}_z)$ and $w\in {\operatorname{Gr}}\mathrm {R}_w$ . As a result, $z\in {\operatorname{Gr}}\mathrm {L}_z\subset {\operatorname{Gr}}\mathrm {R}_w$ . Then $z\in {\operatorname{Gr}}\mathrm {L}_z\cap {\operatorname{Gr}}\mathrm {R}_w=\lbrace 0\rbrace $ . Thus $\overline{A}=\lbrace 0\rbrace $ . Since there is no a left annihilator subspace from  $M$ containing $x$ and orthogonal to  $\mathrm {L}$ , by Lemma REF there is a right annihilator subspace $\mathrm {R}^{\prime }$ containing $x$ . Let us define a $\mathcal {M}$ -module $\mathcal {M}^{\prime }$ which is a bimodule by multiplication on the right by elements from $\mathcal {M}_x$ and on the left by elements from the algebra $\mathcal {R}^{\prime }$ . Let us consider the set $\tilde{C}:=\lbrace t+tu \mid t\in \mathcal {R}^{\prime }, \; t\perp \mathrm {L}_x \;\; {\rm and} \;\; u\in \mathcal {M}_x\rbrace $ . Let us check that this set is an algebra and coincides with $\mathcal {R}^{\prime }$ . Suppose, that there are non orthogonal elements $a,b\in \tilde{C}$ , where $a=s+sv, \; b=t+tu \;\; (s,t\in \mathcal {R}^{\prime }, \;\; s\perp \mathrm {L}_x, \;\; t\perp \mathrm {L}_x, \;\;v,u\in \mathcal {M}_x)$ . Then, taking into account that $[s,t]=0$ , we have: $\Vert (s+sv)(t+tu)\Vert ^2=\Vert s t+stu+tvs+stvtu\Vert ^2.$ In this case, we may suppose that $\Vert vsu\Vert =1$ . We may also consider, without loss of generality, that $vt\in \mathcal {M}_x$ . Then, by Lemma REF we obtain: $\Vert (s+sv)(t+tu)\Vert ^2\ge 4-\frac{3}{4} -2+\frac{2-\Vert vts\Vert }{2} = 1 + \frac{1-\Vert vts\Vert }{2} >1.$ So, $\Vert s+sv\Vert =\Vert t+tu\Vert =1, \; \Vert (s+sv)(t+tu)\Vert >1,$ which contradicts the condition that $[s,t]=0, \; (s+sv,t+tu)\ge 0$ . So, $\tilde{C}$ is an algebra. Let us check that $\tilde{C}\subset \mathcal {R}^{\prime }$ . Since $[s,\mathrm {L}_x]=[u,\mathrm {L}_x]=0$ we obtain: $\Vert (s+sv)u\Vert ^2=\Vert su+stv\Vert ^2=1, \;\;\;\;\Vert (s+sv)x\Vert ^2=\Vert sx+stx\Vert ^2=1.$ So, $\tilde{C}\subset \mathcal {R}^{\prime }$ . Analogously, one can show, that $C\subset \mathcal {R}^{\prime }$ . In this case, $C=\tilde{C}$ , and then $\mathcal {M}^{\prime }$ is an algebra over  $\mathbb {F}$ . Theorem REF states that $\mathcal {M}^{\prime }$ is simple. The ideal $\mathcal {J}\subset \mathcal {M}$ , which is an ideal from  $\mathcal {M}$ containing $x$ and orthogonal to  $\mathrm {L}$ , is an ideal from $\mathcal {M}^{\prime }$ . This leads to that $\mathcal {M}^{\prime }=\mathcal {J}$ . Suppose, that $\mathrm {L}$ is the left annihilator subspace of some element $y\in \mathcal {M}_x$ . Then $y\perp \mathrm {L}$ and, by Lemma REF , there is an orthogonal to  $\mathrm {L}$ element $u\ne y$ , such that $\mathrm {L}=\ker Tu$ in  $\mathcal {M}$ . Analogously, there is $v\in \mathcal {M}^{\prime }$ such that $\mathrm {L}=\ker T_v$ in  $\mathcal {M}^{\prime }$ . This is impossible, because $\mathcal {M}^{\prime }$ is simple. Thus $\mathcal {R}^{\prime }$ does not contain a unit. Theorem REF states that $\mathcal {J}$ is a Jordan triple system over  $\mathbb {F}$ generated by $x\in B$ and orthogonal to $\mathrm {L}=\mathrm {L}_x$ from the $r$ -bounded right $M$ -module  $B$ satisfying (1) and (2) of Theorem REF . By Lemma REF , $\mathcal {J}$ is orthogonal to $x^+$ . Hence, $\mathcal {J}$ is a Jordan triple system over  $\mathbb {F}$ generated by $x^+$ and $x$ satisfying (1) and (2) of Theorem REF . Remark 5.6 We observe that, according to , the relations (1) and (2) of Theorem REF determine a normed unital $JC^*$ -algebra $\mathcal {J}$ .
stop
null
Programacion en linea de comandos para un analista financiero (Analisis de datos con comandos Linux, Microsoft Excel y Python) M. en C. Fernando Espitia Buitrago 2021 /04 /25 Python 2021 /04 /25 ```python myVar = 3 if myVar == 3 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios ``` ```python myVar = 1 if myVar == 3 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios ``` ```python myVar = 3 if myVar == 3 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios ``` # IF- ELSE ```python myVar = 3 if myVar == 3 : # Comando print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios else : # Cuando el comando no se cumpla. print('D') ``` A B C ```python myVar = 4 if myVar == 3 : # Comando print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios else : # Cuando el comando no se cumpla. print('D') ``` D # ELSE IF = elif ```python myVar = 3 if myVar == 4 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') elif myVar > 4 : print ('D') else : # Cuando el comando no se cumpla. print('E') ``` 3 ```python myVar = 5 if myVar == 4 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') elif myVar > 4 : print ('D') else : # Cuando el comando no se cumpla. print('E') ``` D ```python myVar = 1 if myVar == 4 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') elif myVar > 4 : print ('D') else : # Cuando el comando no se cumpla. print('E') ``` E # ELSE IF = elif (y varios comando dentro de IF) ```python myVar = 5 if myVar == 5 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') print ('3') # 4 espacios print ('3') # 4 espacios elif myVar > 4 : print ('D') print ('D') # 4 espacios print ('D') # 4 espacios else : # Cuando el comando no se cumpla. print('E') ``` A B C ```python myVar = 3 if myVar == 5 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') print ('3') # 4 espacios print ('3') # 4 espacios elif myVar > 4 : print ('D') print ('D') # 4 espacios print ('D') # 4 espacios else : # Cuando el comando no se cumpla. print('E') ``` 3 3 3 ```python myVar = 6 if myVar == 5 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') print ('3') # 4 espacios print ('3') # 4 espacios elif myVar > 4 : print ('D') print ('D') # 4 espacios print ('D') # 4 espacios else : # Cuando el comando no se cumpla. print('E') ``` D D D ```python myVar = 2 if myVar == 5 : print('A') # 4 espacios print('B') # 4 espacios print('C') # 4 espacios elif myVar == 3 : print ('3') print ('3') # 4 espacios print ('3') # 4 espacios elif myVar > 4 : print ('D') print ('D') # 4 espacios print ('D') # 4 espacios else : # Cuando el comando no se cumpla. print('E') ``` E
stop
null
Rishabh Bhardwaj ## Task - 1 ## Student Percentage Prediction ```python #Importing libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn.linear_model import LinearRegression ``` ```python #Importing the data from file url = 'http://bit.ly/w-data' df = pd.read_csv(url) df ``` Hours Scores 0 2.5 21 1 5.1 47 2 3.2 27 3 8.5 75 4 3.5 30 5 1.5 20 6 9.2 88 7 5.5 60 8 8.3 81 9 2.7 25 10 7.7 85 11 5.9 62 12 4.5 41 13 3.3 42 14 1.1 17 15 8.9 95 16 2.5 30 17 1.9 24 18 6.1 67 19 7.4 69 20 2.7 30 21 4.8 54 22 3.8 35 23 6.9 76 24 7.8 86 ```python #To display first five rows from data df.head() ``` Hours Scores 0 2.5 21 1 5.1 47 2 3.2 27 3 8.5 75 4 3.5 30 ```python #To display the last five rows of data df.tail() ``` Hours Scores 20 2.7 30 21 4.8 54 22 3.8 35 23 6.9 76 24 7.8 86 ```python #For checking whether their is any null values df.isnull == True ``` False ###### As we can see that there is no null value so we can move towards visualisation of our data ```python #Visualisation of data df.plot(x='Hours', y = 'Scores', style='*') plt.title('Hours vs Percentage', fontsize = 15) plt.xlabel('Hours studied', fontsize = 12) plt.ylabel('Percentage Score', fontsize = 12) plt.show() ``` <Figure size 432x288 with 1 Axes> ###### From above graph we can assume that there is a positive linear relation between the hours studied and the percentage of the score. ```python #dividing the data into "attibutes" (inputs) and "Labels"(output) X = df.iloc[:, :-1].values y = df.iloc[:, 1].values ``` ```python #splitting the data into training and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) regressor = LinearRegression() regressor.fit(X_train.reshape(-1,1), y_train) print('Training completed.') ``` Training completed. ```python # Plotting the regression line line = regressor.coef_*X + regressor.intercept_ # Plotting for the test data plt.scatter(X, y) plt.plot(X, line, color = 'red'); plt.show() ``` <Figure size 432x288 with 1 Axes> ```python print(X_test) y_pred = regressor.predict(X_test) y_pred ``` [[1.5] [3.2] [7.4] [2.5] [5.9]] array([16.88414476, 33.73226078, 75.357018 , 26.79480124, 60.49103328]) ```python #comparing the actual vs the predicted percentage df1 = pd.DataFrame({'Actual':y_test, 'Predicted':y_pred}) df1 ``` Actual Predicted 0 20 16.884145 1 27 33.732261 2 69 75.357018 3 30 26.794801 4 62 60.491033 ```python #Predicted Score hours = 9.25 own_pred = regressor.predict(np.array([hours]).reshape(-1,1)) print('No of Hours = {}'.format(hours)) print('Predicted score = {}'.format(own_pred[0])) ``` No of Hours = 9.25 Predicted score = 93.69173248737538
stop
null
/README.md A C library implementing an extensible, easy to use CLI interface. cli A C library implementing an extensible, easy to use command line interface. Compiling To build the cli library, install cmake and clone this repository. Create a build directory and cd into it: mkdir build && cd build. Run cmake .. -DCMAKE_BUILD_TYPE=Release. Run make. The output will be in build/src/. On UNIX, you may specify the variable CXX to the compiler you want to build using. The following compiler versions of those two variables are officially supported: clang version 5.0.1 gcc version 7.2.0 For example, to use clang version 5.0.1 to compile the library, specify cmake .. -DCMAKE_BUILD_TYPE=Release -DCXX='clang++-5.0' make A simple example The example below will show how to construct a simple CLI with a few commands. cli_t cli; cli_construct(&cli); cmd_t hello_world = { "hello", { subcmd_t { "world", { fn_argt { "print world", fn_arg { "[n=1] print 'world' this many times", "n", true, arg_info_t { "number of 'world' repetitions", false } }, NULL }, NULL }, NULL }, NULL }, "[-h, --help] print hello [--world] - " "type 'hello [--world] help' for help with these options", fn_argt { "print hello", fn_arg { "[--world] print hello and 'world' [n=1] times", "world", false, arg_info_t { "print 'world' n times", true } }, fn_arg { "[n=1] print 'world' this many times", "n", true, arg_info_t { "number of 'world' repetitions", false } }, NULL }, NULL }; cli_register(cmd, hello_world); What the above example does: First we construct a cli_t variable to hold our CLI. Next we declare a cmd_t and initialize it with a set of member values. Here we've created a cmd named hello with one optional argument, --world and a subcmd hello world, which also has one argument, n. We then register the cmd to the CLI. Note: args and flags are not differentiable; if the cli_parse encounters an arg with a value of false, it will behave as a flag. Options are simply named arguments. If an optional argument is used to construct the cmd_t, it is used to place an optional flag, that argument's name, at the front of any cmd options. Executing this program would result in the following. $ ./example.out $ ./example.out help hello - type 'hello [--world] help' for help with these options $ ./example.out hello hello $ ./example.out hello --world hello world $ ./example.out hello --world 5 hello worldworldworldworldworld $ ./example.out hello world --help hello world - type 'hello [--world] help' for help with these options $ ./example.out hello world 3 worldworldworld
stop
null
the plant has reached a certain degree of maturity. In this part are the seeds con- tained. Of these there are, generally speaking, six in each apple, although it sometimes happens that there may be one or even more. A seed consists of three portions — the testa, albumen, and embryo. The seed is composed of three portions, viz., the testa or cover- ing, the albumen, and the embryo or germ of the future apple. The embryo, if planted and treated in a proper manner, would produce another apple tree, which would bear the fruit to which the parent plant belongs. This, it will be remembered, is the principal manner in which fruit trees are propagated. Another method that is practised is, to cut a slip or shoot of the desired variety of apple, and, by causing to unite it with that of another tree, to propagate the desired variety upon the roots of the last named. The operation is called grafting, and is performed with such skill that it is difficult to find any difference between the root and the slip, notwithstanding that they belong to different kinds. The manner of doing this will be explained. It consists in inserting the slip in the stock and fastening it down on one side, so that they shall grow as one. The seed in the middle is the embryo. It is composed of the cotyledons or seed leaves.
stop
null
@=15 = "D7^4 = 9'94^ = 14'' 3*@82" = 6'0,93^ = 4'5 @44,85 = 2'2,45,4 @78 = 4'9-4 *2*+ = 2'0,'^'+ 8'441, = 03 94^+@82" = 4'' 2*-5,4 = 1, ^7 +," +1*44 04^-4 = ^+'7+ +^715 = 0'' 12 ^49 7574'1 = 4'7 +,* 20+^+ = 1'''4, 417^,4, = 2''''0,4, * 43 ^+3" *2*+ 4'00+44 * 40'123 "=5 0,4*7*4" = 4'' + ^7 ^+ = 4^9" * 4 +," +1*4 4-^92 ^+4" = 4" 915 ^473'' 0''9 = 7+* *'44 @=5 @4 9^,+* @* = 2'3+ *9^' 3'042" = 1, ^7 9"5 + +5" +51* "=5 ^+0*@5+4 = 4'4 9" +5 3*5 = 2'5 5* = '4 ^12,0" = 2'0' 5^+5 = 4'' 1213 -0757+ = 2'3+'4'@45 3 5,=+,+ = 4'4*04+ * 59*5" 3*@8" = 2'0,9-5"
stop
null
D]] = [A]]^([D]] [ [G]]_g [F]])([F]] [ [D]]_d 0) = [F]]_f [C]]_c [ [[D]]_d 0 = [B]]. Hence [ (A [ B) [C [ D]] = [ (A [ C) [ (B [ D)]. The cases of all other operations involving ; are similar. (iv) is the most important part of this proposition. It is a direct consequence of the two following lemmas. **8.20 Lemma** _For any context D and any hypersequent G, if G is in HJK, then D G is in HJK._ Proof : Induction on the structure of _D_. • _Case_ D = _H_ ; _∧_. Let _G_ ∈ HJK be given. By the induction hypothesis, (H; ∧) G is the same hypersequent _G_, whence it also is in HJK. • _Case_ D = _H_ ; _∨_. Let _G_ ∈ HJK be given. We may assume _H_ different from the empty context, say _H_ = _H_ ′; _D′_. Then, as _D′_ may only be an atom, we have: 0 ∈ {[ [B C]]_b_c | (B ∧ C) → C B G′}. Thus, by 8.4, {[ [B C]]_b_c | (B ∧ C) → C B G′} = {0} and, applying the induction hypothesis to _H_ ′, it follows that (H′; _∨_ ) G′ belongs to HJK and is thus closed with respect to _∨_. Consequently, {[ [A [ B]]_a | (A [ B) → A; B G′} = {[A]]^[[B]] | (A [ B) → A; B G′} ⊆ {[ [A [ B]]_a [B]] | (A [ B) → A; B G′} ⊆ [[A [ B]] | (A [ B) → A; B G′} = {[B]] | (A [ B) → A; B G′}, and (H; _∨_ ) G is closed with respect to ∨. • _Case_ D = _H_ ; →. Let _G_ ∈ HJK be given. We may assume H to be different from the empty context, say H = H′;D′. As before, we obtain: 0 ∈ {[[D]]_d | (D→D) →D G′}. Hence, by the induction hypothesis, G′ belongs to HJK and is thus closed with respect to →. Therefore: {[[D]] | ([D→E]] [D]) → E G′} = {[D→E]]^[D]] | ([D→E]] [D]) → E G′} ⊆ {[D→E]]^[D]]^[[E]] | ([D→E]] [ D]) → E G′} ⊆ {[E]] | ([D→E]] [D]) → E G′}. Consequently, (H; →) G is closed with respect to →. **8.21 Lemma** _For any hypersequent G and any formulas A, D, E, and any formula contexts F, H_ : (i) _If F D_ ; _H D_ →D E G, then H D_ →D E G. (ii) _If F D E_ ; _H D_ →D E G, then H D_ →D E G. Proof : The two claims are proved simultaneously. As G, F, and H are kept constant, we have abbreviated F G with F, and similarly for G and H. We thus consider the case of G = G′ G″ G* and successively distinguish seven cases corresponding to all seven rules of HJK and to the principal formula of the rule (if applicable). We use (a) and (b) to refer to the two claims. In the six cases below, we obtain two subcases corresponding to the placement of the sequent _D_ → _D_ _E_ relative to G′, G″, and G*. In each subcase, we use (a′), (b′), and (a″), (b″) to refer to the two claims respectively with _G′_ and _G″_ in place of G. • _Case D_ → _D_ _E_ = A → A A. In this case, it is enough to show that: {[D]]_d | D → D E G′ G″} = {[D]]_d | D → D E G′} = {[[D]]_d | D → D E G″}, from which the claims immediately follow. The first equation holds as the element [D]]_d with _d_ = 0 belongs both to the left- and to the right-hand side, and hence both are the full Boolean algebra. As { [D→D]]_d_e | D → D E G′ G″ G*} = {0} and [D]]_d = 0 if and only if [D→D]]_d_e = 0, the second equation also holds. • _Case D_ → _D_ _E_ = (C [ D) → C;D. Then, for (a): {[C [ D]]_c_d | (C [ D) → C;D G′ G″} ⊆ {[C [ D]]_c_d [C]]_c | (C [ D) → C;D G′ G″} ⊆ {[C]]_c | (C [ D) → C;D G′ G″} ⊆ {[C [ D]]_c_d | (C [ D) → C;D G′ G″}. Thus, the sets involved in (a) are identical in both cases and { [C [ D]]_c_d | (C [ D) → C;D G′ G″} is in either case equal to {[C [ D]]_c_d | (C [ D) → C;D G′} {[C [ D]]_c_d | (C [ D) → C;D G″} which in turn equals . In this case, { [A→(C [ D)]]_a_(c_d)_ | (A→(C [ D)) → A;C;D G} = {0}, and for (b) we obtain similarly to the discussion above: {[A→(C [ D)]]_a_(c_d)_ | (A→(C [ D)) → A;C;D G} = {[A→(C [ D)]]_a_(c_d)_^[A]]_a | (A→(C [ D)) → A;C;D G} ⊆ {[A→(C [ D)]]_a_(c_d) [[A]]_a [[C [ D]]_c_d | (A→(C [ D)) → A;C;D G} ⊆ {[A]]_a | (A→(C [ D)) → A;C;D G}. Hence the sets involved in (b) are identical in all cases. • _Case D_ → _D_ _E_ = A;D → A;(A [ D). Then: {[A;D→A;(A [ D)]]_(a_d)_(a_(c_d)_) | A;D→A;(A [ D) G′ G″} = {0} ⊆ {[D→A;(A [ D)]]_d_(a_(c_d)_) | D→A;(A [ D) G′ G″} = {[D]]_d^[[A]]_a [[C [ D]]_c_d | A;D→A;(A [ D) G′ G″}. The other cases of (a) are similar, or the respective formula contexts do not match. • _Case D_ → _D_ _E_ = (C ∧ D) → C. Then, for (a): {[C ∧ D]]_c_d | (C ∧ D) → C G′ G″} ⊆ {[C ∧ D]]_c_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} ⊆ {[[(C ∧ D) [ (D ∧ C)]]_(c_d)_(d_c)_^[[C]]_c^[[D]]_d [[(C ∧ D) [ (D ∧ C))]_(c_d)_(d_c)_ [[D]]_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} = {[C ∧ D]]_c_d [[C ∧ D]_(c_d)_(d_c) | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} ⊆ {[C ∧ D]]_c_d^[[C ∧ D]]_c_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″} ⊆ {[C ∧ D]]_c_d | ((C ∧ D) [ (D ∧ C)) → C;D G′ G″}. Now, as { [[A ∧ B]]_a_b | (A ∧ B) → A B G} = {0} implies { [[A ∧ B]]_a_b | (A ∧ B) → A B G′} [[A ∧ B]]_a_b | (A ∧ B) → A B G″} = {0}, the first case of (a) is settled. • _Case D_ → _D_ _E_ = (C ∧ D) → D. Similar to the last case. • _Case D_ → _D_ _E_ = C;(A → B) → (C;A → B). Then: {[[C;(A → B) → (C;A → B)]]_(c_(a_(b_1)))_(c_a)_(b_2)_ | C;(A → B) → (C;A → B) G′ G″} = {0} ⊆ {[[A → B]]_(a_(b_1))_(b_2) | A → B G′ G″} ⊆ {[C;(A → B)]]_(c_(a_(b_1))) | C;(A → B) → (C;A → B) G′ G″}, where for the first inclusion we used that (H; →) G′ belongs to HJK (as well as G″). For the second inclusion, we have: {[[A → B]]_(a_(b_1))_(b_2) | A → B G′ G″} = {0} ⊆ {[[C;A → B]]_(c_a)_(b_2) | C;(A → B) → (C;A → B) G′ G″} which is all that remains to be proved for (a). • _Case D_ → _D_ _E_ = C;A → (C;A). Then, for (a): {[C;A→(C;A))]_ (c_a)_(c_(a_1)) | C;A→(C;A) G′ G″} ⊆ {[C;A→(C;A))]_ (c_a)_(c_a) | C;A→(C;A) G′ G″} = {[[C;A]]_c_a | C;A→(C;A) G′ G″} = {[[C;A→(C;A))]_ (c_a)_(c_a) [[C;A]]_c_a | C;A→(C;A) G′ G″}. Finally, we have to consider the cases in which (D→D) E is not the principal formula, and, again, seven subcases corresponding to the seven rules of HJK (this time not applying to the principal formula). Here, the first two subcases reduce to one another, as both claim that the hypersequent in question is identical with itself. We indicate only the case in which the rule was an application of _∧-_ I with the principal formulas C D and C D in G′ and G″, respectively, and we restrict to the second subcase. All other cases are similar. Thus, for (a):
stop
null
RealEstate::Application.routes.draw do #The priority is based upon order of creation: #first created -> highest priority. #Sample of regular route: # match 'products/:id' => 'catalog#view' #Keep in mind you can assign values other than :controller and :action #Sample of named route: # match 'products/:id/purchase' => 'catalog#purchase', :as => :purchase #This route can be invoked with purchase_url(:id => product.id) #Sample resource route (maps HTTP verbs to controller actions automatically): # resources :products #Sample resource route with options: # resources :products do # member do # get 'short' # post 'toggle' # end # # collection do # get 'sold' # end # end #Sample resource route with sub-resources: # resources :products do # resources :comments, :sales # resource :seller # end #Sample resource route with more complex sub-resources # resources :products do # resources :comments # resources :sales do # get 'recent', :on => :collection # end # end #Sample resource route within a namespace: # namespace :admin do # # Directs /admin/products/* to Admin::ProductsController # # (app/controllers/admin/products_controller.rb) # resources :products # end #You can have the root of your site routed with "root" #just remember to delete public/index.html. # root :to => "welcome#index" root :to => 'pages#index' match '/signout' , :to => 'sessions#destroy', :as => :signout resources :sessions resources :properties resources :comments resources :users resources :pages match '/auth/failure', :to => "pages#fblogin" match '/auth/failure' => redirect('/signin') match '/auth/:provider/callback' => 'sessions#create' #See how all your routes lay out with "rake routes" #This is a legacy wild controller route that's not recommended for RESTful applications. #Note: This route will make all actions in every controller accessible via GET requests. match ':controller(/:action(/:id))(.:format)' # User SignUp Page get 'signup' , :to => 'users#new' # User Sign In Page get 'signin' , :to => 'sessions#new' # User Sign Out Path delete 'signout' , :to => 'sessions#destroy' , :as => 'signout' get '/cities/' , :to => 'cities#index' get '/cities/list' , :to => 'cities#list' get '/cities/:id' , :to => 'cities#show' match '/properties/:id/show' => 'properties#show' match '/properties/:id/proppics' => 'properties#show_proppic' # Fb Login Routes get '/fblogin' => redirect('/auth/facebook'), :as => :fblogin get '/fbcallback' => redirect('/cities') get '/fbfailure' => redirect('/cities') match '/auth/:provider/callback' => 'sessions#create' match '/auth/failure' => redirect('/signin') end
stop
null
The PageRank Algorithm](https://en.wikipedia.org/wiki/PageRank) [[Stanford Link](https://web.stanford.edu/class/cs224n/readings/cs224n-2020-notes01-gensim-word2vec.pdf)] The PageRank Algorithm takes the eigenvector of Google's web graph that corresponds to the largest eigenvalue, which is the ranking score. Let $r \in \mathbb{R}^{n}$ the PageRank vector, where $r_{i}$ is the score for $web page i$. For every web page $j$ that links to $i$ we will increase $r_{i}$ proportionally to the score of $j$. Additionally, if $j$ links to a lot of other web pages, we will only increase $r_{i}$ slightly. Let $M_{ij} \in \{0, 1\}$ represent the transition matrix of the web graph. If the adjacency list of web page $j$ is $out(j)$ then: $$ M_{ij} = \begin{cases} \frac{1}{\left |out(j) \right |} & \text{if j links to i} \\ 0 & \text{otherwise} \\ \end{cases} $$ Let $A$ be a linear system, then $Av = 4v$ means that 4 is the eigenvalue of $A$ and $v$ is the eigenvector. Every web page must get some probability of ranking, so we must be sure that $\forall x, r_{i} \ne 0$, which implies that we will choose the largest eigenvalue. PageRank doesn't take damping into account, so: $$ r = Mr $$ $$ r - Mr = 0 $$ $$ r(I - M) = 0 $$ We want to solve the previous equation when $\forall x, r_{i} \ne 0$, hence $r = (I - M)^{-1}$ but $(I - M)$ is not always invertible since 1 is not always in the spectrum. We will apply damped PageRank with constant $\alpha \in (0, 1)$ which lets us prove that $r$ exists and is unique. $$ r = \alpha * r + d $$ where $d = (1 - \alpha) * e^{\frac{1}{n}}$ is a probability mass, $d \cdot 1 = 1$ $$ r = \alpha * M * r + d $$ $$ r - \alpha * M * r = d $$ $$ r(1 - \alpha * M) = d $$ $$ r = (1 - \alpha * M)^{-1}d $$ To calculate the right side, we will use [Power Iteration Method](https://web.stanford.edu/class/cs224n/readings/cs224n-2020-notes01-gensim-word2vec.pdf): Let $c = d$, then: $$ r = c / c \cdot 1 \\ c = M * c $$ We run the algorithm until $c$ is very small. [[The HITS Algorithm](https://en.wikipedia.org/wiki/HITS_algorithm)] For each web page, we have two values: - Authority Score $A_{i}$ - measures how important is page $i$ (how many other important web pages refer to page $i$). - Hub Score $H_{i}$ - measures how good the page is at linking to other web pages (is $H_{i}$ large and $A_{i}$ is small, the web page is a good hub). Let $A \in \mathbb{R}^{n}$ be the authority vector and $H \in \mathbb{R}^{n}$ the hub vector, so $A_{i}$ and $H_{i}$ are the two corresponding values for page $i$. The idea is to increase $A_{i}$ if $\sum_{j \rightarrow i} H_{j}$ is high and increase $H_{i}$ if $\sum_{j \rightarrow i} A_{j}$ is high: - Authority Update: $$ \forall i A_{i} = \sum_{j \rightarrow i} H_{j} $$ - Hub Update: $$ \forall i H_{i} = \sum_{j \rightarrow i} A_{j} $$ In matrix form: - Authority Update: $$ A = MH $$ - Hub Update: $$ H = AM $$ And $M = A^{T}$ since $A = MH$ implies $A = MA^{T}$ We combine them in two steps: - Authority Update: $$ A = AM $$ - Hub Update: $$ H = AM $$ At initialization $A = 1$, therefore $A = AM = A^{T}M = M^{T}A = HM \therefore H = A = e^{\frac{1}{n}}$. We run the algorithm for many steps, and the hubs and the authorities score will grow. To normalize each of them, we divide by the largest value.
stop
null
jseml2]] module. 12. In response, the Court rejected the contention by the public employee that the NLRB's waiver of hearing caused him irreparable injury because he lost the chance to argue in person before an ALJ, concluding that the "lack of oral argument does not rise to the level of 'irreparable injury,'" id. at 449. Similarly, in Kolar the ALJ held a de novo evidentiary hearing on the underlying unfair labor practice charge and then the Board reviewed her decision de novo. Id. at 457. The union argued in federal court that the Board's waiver of a full evidentiary hearing constituted an abuse of discretion because the ALJ's review did not afford the union an adequate opportunity to be heard. Id. at 466. The Court concluded that the opportunity given the parties was sufficient because they had an opportunity to present all their evidence before the ALJ and also to file exceptional briefs before the Board, 13 and, moreover, that the ALJ's review was entitled to the same degree of deference afforded the Secretary's review in Board cases. Id. at 464. Thus, the Court determined that the opportunity to be heard afforded by these mechanisms was "not inconsistent with our conclusions in Board decisions." Id. at 465 n.8. [fn12. The Board had convened an administrative hearing, which was continued several times because of the employee's failure to pay his deposits. The Board then conducted a conference call with the public employee's representative and with counsel for the town to offer the employee one final chance to pay his deposits or the hearing would be held in absentia. The employee's counsel assured the Board that it would appear at the rescheduled hearing and asked the Board to grant additional time to pay the balance owed. At that point, the Board decided to postpone the hearing pending the payment of the remaining balance. Despite the Board's explicit statement that this was an offer to continue the employee's right to have an administrative hearing, counsel for the employee stated at the conference call that the decision to postpone the hearing was "an unconstitutional waiver of Mr. [public employee's] right to have an administrative hearing, and, if that is so, I have no inquiry into my obligations in terms of payments at this time." After the public employee failed to pay his deposits the hearing was held in absentia. 104 LRRM (BNA) 1624. [fn13. The Board's rule of practice and procedure 2107 provides that the parties to a proceeding before the Board shall "be afforded an opportunity to file written exception to any decision which is adverse to their position. These exceptions shall contain a concise and specific statement of the points to be considered and argued; unsupported statements of conclusions of law or fact will be disregarded." 29 C.F.R. 102.2107. ] The union and the Board interpret these cases as requiring that parties to administrative hearings in NLRA proceedings enjoy "the same level of procedures and due process as in a trial in court." Post at 12- 13. However, that reading overlooks the core of the Court's reasoning in these cases. The Court in Board of Governors did not find that the parties were entitled to a full evidentiary hearing because it was "obviously absurd" to hold that the Board might not review Secretary's decisions de novo without hearing new evidence. 468 U. S., at 287. Furthermore, nothing in Board of Governors can be construed to mean that the procedural rights afforded in the District Court, or in the case of public employees, the federal courts of appeals, were required before Board review. It is true that in Kolar the Court said that the opportunity to be heard afforded by the ALJ's hearing and the opportunity to file exceptional briefs was "not inconsistent" with the procedural protections afforded Board members in Board decisions. 535 U. S., at 465 n.8. That is hardly surprising given that the union had the opportunity, which it pursued, to litigate its case under the Board's hearing procedures--those same procedures that are relied upon to ensure due process of law in Board decisions. Moreover, as the cases I have discussed illustrate, the deference that the courts accord the Board's review reflects not merely the due process standards, but also the fact that the Board is vested with broad discretion and judgment regarding the conduct of its proceedings. Because the Board is vested with this discretion and judgment it may exercise its discretion in resolving whether an administrative hearing is necessary under the circumstances. In fact, as the Board noted in the case before us, it did convene an administrative hearing and offer the union the opportunity to present its case. The Board only decided that no evidentiary hearing was necessary after it reviewed the evidence submitted and concluded that the question of the unit boundaries did not have to be decided on the underlying charges. It was therefore not a blanket denial of the right to an administrative hearing, but a determination by the Board that an evidentiary hearing on the unit question in this case was not necessary. This approach is consistent with the longstanding practice that a petitioner seeking Board review need not reproduce at the Board the factual record developed at the hearing level. Board of Governors, supra, at 287. See 535 U. S., at 466-468 (determining that ALJ's hearing complied with due process standard and Board did not abuse its discretion by rejecting the union's request for de novo evidentiary hearing). Thus, we read the two most recent decisions that involved de novo review by the Board as consistent with the Court's traditional understanding that de novo review does not necessarily require a hearing, including the opportunity to cross-examine witnesses in order to test the credibility of the witnesses and the completeness of the evidence, that may be constitutionally required when a hearing has been held. 14 Indeed, given the evidence before us, we cannot say that the Board abused its discretion by declining to conduct an administrative hearing on this case. [fn14. In National Labor Relations Board v. Northern Pipeline Construction Co., 458 U. S. 50 (1982), the Court struck down the nonjury trial provisions of Title III of the Bankruptcy Amendments and Federal Judgeship Act of 1978 that authorized bankruptcy judges sitting as Article I judges to hear and decide nonjury bankruptcy trials. Id., at 51-52, 58-61. The Court emphasized that bankruptcy courts sitting as Article III judges lacked jurisdiction to conduct trials, but could exercise this jurisdiction only as a result of judicial reorganization or when Congress created a system "characterized by a separation of power in which the [nonjury] hearing and trial functions have been kept separate." Id., at 73. Furthermore, "a jurisdictional defect may not be cured by 'the trial judge's mere refusal to find from the evidence certain facts of importance to one party.'" Id., at 71. In Board of Governors the Court did not suggest that the NLRB has Article III trial powers similar to those held by the Secretary's reviewers. In fact, the NLRB is authorized to adopt procedural safeguards that are less stringent than those required in federal courts. NLRB v. Local 1967 of Assn. of Barbers of America, 399 U. S. 627, 641 (1970). In Kolar the Court reaffirmed that "the Board is free to provide adequate administrative procedures that may not provide all the safeguards contemplated by Article III. 15 " See id., at 458-464 (rejecting union's contention that Kolar was entitled to the due process protections of federal courts); 29 U. S. C. § 160(e) ("The board [of the NLRB] is hereby authorized and directed to make, amend, and rescind, in the manner prescribed by the Administrative Procedure Act, such rules and regulations as may be necessary to carry out the provisions of this Act."). [fn15. The union also challenges the procedures that were employed in the case before us, arguing that "the Board's hearing process does not meet minimum due process requirements in cases where the union's grievance is resolved on substantive grounds without an evidentiary hearing and opportunity to cross-examine adverse witnesses on critical facts." Brief for Union 36-37. However, given the nature of the relief that the Board granted the union, our case does not require us to answer these broader questions. ] In sum, we conclude that de novo review by the NLRB does not necessarily require the same hearing rights afforded in federal court proceedings under the NLRA or under traditional de novo review standards. Furthermore, under this standard the union is not entitled to a hearing in which it may cross-examine witnesses. In determining whether de novo review requires an evidentiary hearing, we take into account the Board's deference to the Secretary's determinations. Just as the Board's exercise of deference is consistent with de novo review, so too the Secretary's review does not require a full evidentiary hearing. Furthermore, consistent with this understanding of de novo review, in the past the Court has not required an opportunity to cross-examine the Secretary's witnesses or the Board in Board matters. In addition, the Board has adequately protected the union's due process rights in this case by affording it the opportunity to present all its evidence before the ALJ. This record was then reviewed by the Board in making its final decision. III Having determined that the Board did not abuse its discretion by deciding not to conduct an administrative hearing, we now consider the union's due process argument, which is premised on the Court's decision in Schware v. Board of Bar Examiners, 353 U. S. 232 (1957). In Schware, a former Army officer appealed his refusal to allow him to sit for the state bar examination due to his association with the Communist Party. Id., at 233. In his appeal, the petitioner asserted that the hearing commission was "inherently and structurally deficient for the purposes to which it was put," and lacked an adequate evidentiary hearing in which he could cross-examine witnesses against him. Id., at 237-238. The petitioner conceded the constitutional validity of the hearing commission, but contended that the findings were "essentially irrational and without foundation in fact and, for this reason, were arbitrarily made and therefore invalid" and that the Commission had failed to provide "adequate basis for inquiry in an important case of such complexity and magnitude." Id., at 238-239. The Court rejected petitioner's contentions, emphasizing that he was provided a hearing before the commission at which "substantial evidence" was introduced and he was afforded the opportunity to cross-examine witnesses, obtain documents, and present witnesses himself. Id., at 242. Thus, although the Court stated that an opportunity for cross-examination "was in any event vital" to a determination of the respondents' decision, id., at 243, it did not hold that such an opportunity was automatically required in all cases. On the contrary, because petitioner had the opportunity to cross-examine the witnesses against him, the Court rejected petitioner's due process argument. The Court recently relied on Schware and its progeny to support its conclusion that the opportunity to cross-examine witnesses was vital to ensure that the ALJ's review complied with due process standards. See Board of Governors of Fed. Reserve System v. Investment Company Inst., Inc., 550 U. S. 118, 144 (2007) (Alito, J., dissenting) (describing opportunity to cross-examine witnesses in administrative proceedings as a "bedrock right guaranteed by the Due Process Clause of the Fifth Amendment") (citing Schware, 353 U. S., at 243). 16 However, the petitioner in Board cases has had the opportunity to cross-examine the witnesses that gave evidence at an administrative hearing conducted by the Board. It was only after a hearing with opportunity to cross- examine witnesses that was not reopened or reviewed de novo by the Board did the Board rely upon this record to determine that cross- examination was not constitutionally required. Furthermore, in contrast to Schware, this case does not involve a "matter of such gravity, intangible interests in personal security, employment, reputation and freedom from humiliation" as those at stake when a person is refused the right to become a lawyer because of political affiliation. Id., at 238 (emphasis added). Here, the Board, in declining to reverse the Secretary decision to defer adjudication of the collective bargaining rights of a group of nurses, acted in accordance with the administrative record of the proceedings. Thus, in light of the important policy favoring resolution of labor disputes through collective bargaining and in order to prevent the Board from being encumbered with a host of procedural challenges, we determine that the Board did not deny the union constitutionally adequate due process when it determined that a hearing before the Board was not necessary to resolve the union's petition. [fn16. The Court emphasized in Schware that the cross-examination allowed the petitioner the opportunity to test the credibility of his accusers and to "smoke out inaccuracies by crossexamination and otherwise clarify testimony by challenging the memory and veracity of witnesses." Id., at 243. Therefore, it is noteworthy that in the case before us the Board, by giving the union an opportunity to review the affidavits of certain nursing staff that testified for the University, adequately addressed the union's due process concern with credibility, and did not refuse the union the opportunity to attack the credibility of the affidavits, or the completeness of the evidence produced, prior to decision. Moreover, the Board found in its review that the affidavits provided "an adequate record for purposes of determining the scope and definition of the unit involved."] For the reasons stated we therefore AFFIRM the judgment of the unpublished memorandum of decision of the Federal Circuit dated November 22, 2016.
stop
null
DSN: MYSQL Database: php_blog Hostname: localhost Username: php_blog Password: 1234 Port: Charset: # Fixed Table Name: [ 'user' => '{{table prefix}}' . 'user', 'article' => '{{table prefix}}' . 'article', 'article_cate' => '{{table prefix}}' . 'article_cate', 'article_tag' => '{{table prefix}}' . 'article_tag', 'article_rela_tag' => '{{table prefix}}' . 'article_rela_tag' ];
stop
null
q&1lHjibHW:| ^H> ^H}lH~$5s H r H H%U H H >,H%U?r,^?r lH +H@6 H V H r % 1U H$5 ~$H} % {| ~$H+ Z Y HJ4 Z Y HJ4 >$H% @| dH r+>H,^ >?Z Y HJ4$H~$H{ ~$5lH>Q>H|~$H% ^H& 47 '14 = H) ( $H+ U% H +> dH r H @?H>Q>H@qH r @q|'u '4r X I $+ ^41$+ H ~$H+$H@$ % ^H4 H4r H$ H+ r H r dH >H >, H & @ r X I $+ H +>rH + U% Z H r $Hq % V & -9 , >rH+U%Z r (u)J * * s X > %Hl|*4 Z X A X 4 X 4'$H+U%H $ $H+U%ZHZ >,H+ U% H $r>H %Z H ZX + * '31 +J s $J $ s X 7 Z> dH+U%ZH>%H?u*X'$H+U%H$+$H+U%ZHHZ $H r l H5H{>H@q| 3 J ; J $J J J & &'$H+U%H$+Z * ^46 H r+>H,$H+U%ZHHZ >,H+U%ZH H>$H{$5lH4r H$H@qH % %Z 7 qHd'qHd$H+U%ZHZ ; ,$+ H >Q>H|U% %Z >r+U%H%V7 qHd s r -'&-s u s * J * J 7 qHd>U?q r H| X4 Y X H$H@qH d? 7 Y I 'r u (u)J * * $H+U%ZHZ >,H+U%ZHY!ZX>J Y J XH ) /& ) ( J * X ; J * J 65 68 % 6 ) $4 $ H4 r r @?H @? r H4l H+$H> H@+H@,>H$ % {| !@? r > ^H4l -Z[ U 1o{U P eH}%Z H> %H @?H r r >$%Z H r U $+ H $H}% ^%$ H dH %Z%$ H%Z $H % ^> r HlH Z +> >H $+ @?H | X H ZH % Z> r @?H,> H @+Z @$| % 4 V r @$H +q H > V >,%Z $ $H >,H $ {q $H H Z>H,>V >,%Z ^ -Z -Z!Z @?H X H5? Z r H ^ H> ^>l!V X4 $%Z r > r U $4 4H Z> U +?r %!H <"!Z @?H X H %lH 4r H | X %Z + @?H V H $H$ r HZ| + >H| ^ H%Z >, ^H Z> ^@ l H@| %Z $H { % ^> %Z >, ^H Z> ^@ l H@| %Z $H!Z Z >4X+ >$ H Z Z HX { { ^H%Z!Z >4X+ >$ H Z @?H @r + >Z{H r <H l H>%V [ {!Z4l Z Z>$H $ r % ^H H %Z V ZH >{Z %Z >,H H <Z ^$H} %Z 4?r r >$ H { %Z Z % + Z H Z> $H %V?^ ^ @?H Z > + Z H Z> $H X4 $%Z r H% V >X U $+ @?$>r XU$Z H Z >U l H > ^H | Z % $H r H> ^H 4l H {$5 {?@ Z ^ ^ { ^% @?H @?q r H l +H Z+ +Z{ H r l % { $4 %X1H Z%{V?r% U $+ ^ @?H Z > ^% $H | > X+ >$ H H %Z ^H >Z 4l H Z $H %V > q?{ X @?H P >, >,V {r $r + Z X X {P $ H r +%Z $H H %H| >%V X r X+ H 4l | %Z + +H U $+ U $+!Z P ^ {^ % Z H l U @? V r Hl Z>Z > -Z @?H H} @?H X HZ { % +U $U H >l H $H U} V 4l @ | l$H, $H { $Z | H| ^H %Z U $+H H r4 + @ + > + @ 4l H r4 $U + {P %Z + < $H{ H H H >V X?H X r U r r > $r +$H Z, l $H{Z| %Z $Z| H4{ H r dH?^ $HU l HU U> r H ZU+ + dH U} X +?r V { +Z @> U H r X $ H$% Z +%Z } $ $H > H4 @q | $?HU + + H+Z $ U +@ +%Z } $H $ { U >%V X 4?r @ + ZHU + >,H H > } q U| H % %Z >+q r 4> H >,+ H V ^ $H { %Z H %H r r H@ qH > ^H | Z %Z $H r H> ^H H {P @q } l H @r+q r @ Z H $H -Z?H + >Hl r ^ l % ^H H $H } l l H Z{H% H@Z ^ + >H?HU } Z ZHU @?H Z > ^% X4 $%Z r dH4+q r H +?q $4 >% } r +$H r HZ{H r l H H | H >X + } U $+ H%{ V%Z H>X + >H ZZH?l $H H H r % H ZH H %H| >%V X?H,> H { H r +H @q$Z| ^Hd H U $+ HZ{H <"!Z 4{ H %Z +?Z [ {!Z @?H ZH %Z Z -Z!Z %Z >V l |?H X+ >,H {q H H %Z >4X+ H X + r X H4l$H X @? r H @+r @ >$lH $U $HU % %Z >,H H ^U $+ +U $ +$H > Z{ %Z Z{ <"!Z % ^@Xl>4{H %Z @ U?X+ >,H {q H H H?r % U $+ +@?q r H |?H $4 + %?^ -Z>Z %l %Z^@ l $ H H @q %H {$5 $ { $ Z r r4 $ + >,H Z > $ %Z r,> Z{H Z{H $HU } %Z + >H H Z r -Z @?$>r XH %?^ + r?ZH Z{ @q? 1o 9Z3Z @? @? Z Z Z {q?%Z[ % >Z? r $ +$H r + V $HU } Z{H U % {V r r H @q| U r r >$ r X+ {!Z 4l H@H %Z < @qH Z{ r H+Z H $4?r +X H @+ @+ $HU % %Z X U $+ r l H!@? + $HU r r >$ >X Z{$+Z> @q?%Z[ % >Z? r > $ +$H r + V $HU } Z{H U % {V r +H U?r % $4 % %Z?r @+$HU $ HU r X + H$ZH $HU r H< H@H!Z X4 $%Z r @?Z { >@?H Z %Z^@ Z@ @?q r H X? r4? ZH$H r X >?H +r?q H$ZH V%Z Z r >$ 4l H Z{H %Z} { @r +Z H >{ + X @H r HX r H r $ X H $ZU H Z{ >$ZH, $Z} $ZH ^?H H { $ZU {q$HU % %Z Z >{!Z $ Z r r H @qH r l H Z >H H ^H% { H Z } ^q H @q $H %V { H 4 { l H $ H!@?X@q X %V Z <"!Z H H %Z Z@ Z <"!Z $Z r r @q @q?Z H!Z <"!Z $ZU H^ H X H $ZU | %Z <"!Z $Z r r $Z r r $Z $ZU {q$HU % {P %Z <"!Z |Z Z H @q r H r @q4 { {P 4 >$ {Z X {P [ {P !^ Z %V Z!Z @ H ^ >,{ X 4 %Z!Z!^ >V $ Z r X +l H H H | $HZ | $H} {!Z!Z HX >Z{ H %Z @?H!Z %Z + > r {X<"!Z $HZ >$ > l ^ X HZ H H {!Z %Z + > >Xl $H %Z H?r $Z r r> $ Z r X4 <"!Z?%Z{ H H$H{r<H H H@Z > @?H!Z%Z <"!Z H % Z>$ @?H @? @? @? H H@Z H@Z H@Z {V Z$Z r r> X4 4?r %$ZH r } % ^> $Z r rH?H {l +X {V $ H X+ $ZH $ZH H r X H $ZH!@?X%?r > X {P H %Z { ^@?H 4 V$Z r @? X H ^ @+Z>H| % Z $HZ Z{ $Z r H r > $Z r H >X V + +4 @? H {Z ^$ZH r $Z $ZH @Z?%?^ H 4 V$Z r @? H {Z $Z r r> ^l $Z H r X H $ZH!@?Z $ZU H %Z > $ZH $ZU l XH $ZH @ H %$ZH r +!@?X {XU $+?{P $ZH { ^@?H H H 4 $ZH %Z l!Z H r | X @X %Z X { Z { q } ^@ | Z {$ r@ H V ^ l% ^H $H} ^$ZU V $Z r H r > $Z r > X?{ @Z l!Z H r | X $ZH >H | U % V ^@ ^H l @?HU l l@H X %Z <{?H l X l ^ %Z $Z r H r > $Z r r V H{?H l <{ $Z %?r H r X $H @q <"!Z $ZH $ZH $ZH { $ZH $ZU l X4 H r > r $ZH X4 V $Z H l@$H X? $Z r r > X4 H r > X XU > @Z ^ $ZH {q?H l!Z!Z 4 %Z U $+H % ^l > ^ @?H? ^ Z@? l $ZH @?H4l r > $ZH H { l@ H H +X H {P <"!Z $ZH {U} $HZ H r+> $ZH ^% >r ^$Z >@ >U $Z r H $ZH r H r U} X ^H > Z $ZH % ^H Z@?%Z[ % H{r % r > >X {P $Z < { H{r <Z$ H%Z [ { %Z H < Z Z$l H ^ $Z r $Z r |Z >Z{ %Z @Z^@ Z Z l H l H@Z ^l l $H % l HZ $ ^ | r@ H <Z$ H%Z [ { >,X +Z{ @Z %Z >l $H { l Z $ZH @?H @? @? !Z 4 >Z{ @Z Z{$> @q %H H H >,+ H V 4?r +r $ZH H H $Z l r H ^ <"!Z H @$ r 4l!Z Z$ZH^H% [ >,$H$|^$H % X H$H ^!^ HZ @ H ^q?
stop
null
d_1\in P_1^\ell,\dots, d_k\in P_k^\ell,\zeta\in Z^\ell, p_\ell(^\beta d_1,\dots,^\beta d_k,^\beta\zeta) = ^\beta w\}\ \} \] Then it is easy to check that $$T = \{^\beta w\mid \exists \ell \in\omega ~ \exists i_0,\dots, i_{s-1}\in\omega ~ \exists \pi_0,\dots,\pi_\ell\in \lambda\ \Psi(^\beta w, \pi_0,\dots,\pi_\ell, i_0,\dots,i_{s-1})\}$$ since for all $\beta,\pi<\lambda$, $\{^\beta w\mid (^\beta w)_i < ^\beta\pi \textup{~for all~} i<\length(^\beta w)\}$ is inductively definable. Letting \[\bigwedge\limits_{n<s} \forall \nu_0,\dots,\nu_{n-1},\nu_n = \forall \vec \nu \in\omega^s,\] and \[\bigwedge\limits_{l=0}^{\ell} \forall \alpha_0,\dots,\alpha_l = \forall \vec \alpha \in\lambda^{\ell+1},\] we see that \begin{align*} T = &\{^\beta w\mid \exists \ell \in\omega ~ \exists \vec i\in\omega^s ~ \exists \vec \pi\in \lambda^{\ell+1}\ \Psi(^\beta w,\vec\pi,\vec i)\} = \\ = &\{^\beta w\mid ^\beta w\in\mathfrak L_{\vec\alpha}^{(^\beta w)}_{\vec\nu,\vec i}\}.\\ \intertext{Similarly we find} \bigcup\limits_{\gamma<\lambda} T_\gamma = &\{w\mid \exists \gamma<\lambda ~ \exists \ell \in\omega ~ \exists \vec i\in\omega^s ~ \exists \vec \pi\in \lambda^{\ell+1}\ \Psi(w,\vec\pi,\vec i)\} = \\ = &\{w\mid w\in\mathfrak L_{\vec\alpha}^{(w)}_{\vec\nu,\vec i}\}.\\ \end{align*} Therefore $\mathfrak L \cap\mathrm{dom}(^\beta) = T$, and by \cite[Lemma 4.2.3 (ii)]{KPTT:2011} we know that $\mathfrak L \in L_{\delta^{\mathfrak L}+1}[T]$. Also, since \[\forall w ~\exists\beta\in\delta^{\mathfrak L} ~ ^\beta w\in T = \mathfrak L \cap\mathrm{dom}(^\beta),\] it is easy to check that \[\forall w\in\mathfrak L ~ w\in L_{\delta^{\mathfrak L}+1}[T].\] Therefore, $\mathfrak L \preccurlyeq L_{\delta^{\mathfrak L}+1}[T]$. Moreover, since $\mathfrak L$ was any inductively definable $\in$-chain of $\mathfrak P_1(\omega)\cap L_{\lambda^+}[E]$, we can conclude by Lemma \ref{lem:Lembedding} that $L_{\lambda^+}[E]$ is a $\Pi_{1+1}^1$-sound model of $\Sigma^1_1\textup{-}\mathsf{AD}$. \end{proof} A set $A\subseteq \omega^\omega$ is called \emph{boldface Borel} iff there exists $P\subseteq \omega^\omega\times \omega^\omega$ a Borel set such that $A = \{f\mid \exists g ~ (f,g)\in P\}$. Boldface Borel subsets are always coinductively definable. \begin{lemma}\label{lem:compatible} If $T$ is an inductively definable subset of $\mathfrak P_1(\omega)\cap L_{\lambda^+}[E]$, and $A$ is a coinductively definable subset of $\mathfrak P_1(\omega)\cap L_{\lambda^+}[E]$, then there is a sequence $\langle\kappa_\xi\rangle_{\xi<\lambda}$ of limit ordinals in $\lambda\cap \mathrm{cof}(\omega)$ such that for each $\xi<\lambda$, for all sufficiently large $\beta\in [\kappa_\xi,\kappa_{\xi+1})$, \[T \cap \mathrm{dom}(^\beta) \subseteq A.\] \end{lemma} \begin{proof} We proceed by induction on the complexities of $T$ and $A$ as $\Sigma^1_1\textup{-}$ and $\Pi^1_1\textup{-}$ formulas. The proof of the induction step is identical to the proof of \cite[Corollary 5.2.2]{KPTT:2011}. We are thus left with the base case where either $T = \{w\}$ or $A = \{w\}$ for some $w\in \mathfrak P_1(\omega)\cap L_{\lambda^+}[E]$. First, suppose $T = \{w\}$ and $w\notin A$. By Lemma \ref{lem:Lembedding}, there exists a countable $\in$-chain $\mathfrak L \subseteq L_{\lambda^+}[E]$ such that $T\subseteq \mathfrak L$ and $A\cap\mathfrak L = \emptyset$. In particular, $w\in\mathfrak L$. By Lemma \ref{lem:chainsame}, there is an embedding $j:\mathfrak L \hookrightarrow L_{\lambda^+}[E]$ such that $j[dom(j)\cap \lambda \cap \mathrm{cof}(\omega)] \subseteq \lambda \cap \mathrm{cof}(\omega)$. This extends to an automorphism of $L_{\lambda^+}[E]$. We conclude that there exists $\beta\in\lambda \cap \mathrm{cof}(\omega)$ such that $w\notin A$ and $^\beta w\notin A$. It follows by the closure property of $L_{\lambda^+}[E]$ under $^\beta$ that $T \cap \mathrm{dom}(^\beta) \subseteq A$ for all sufficiently large $\beta<\lambda \cap \mathrm{cof}(\omega)$. The argument when $A = \{w\}$ is the same except we use the closure property of $L_{\lambda^+}[E]$ under $^\beta$ in the other direction. \end{proof} We now are finally in a position to state and prove the first main result of the paper. \begin{theorem} Let $E$ be an extensible relation on $\mathfrak P_1(\omega)$. Then $\Theta^E = \aleph_{\lambda^+}$, where $\lambda = \Theta^E\cap \mathrm{cof}(\omega)$. \end{theorem} \begin{proof} Since $\lambda <\Theta^E$, there is a surjection from $\mathfrak P_1(\omega)$ to $\lambda$, so $\lambda\leq \aleph_1\leq \aleph_{\lambda^+}\leq \Theta^E$, and we are left to show that $\Theta^E \leq \aleph_{\lambda^+}$. By Corollary \ref{cor:completeness} and Lemma \ref{lem:Lembedding}, $L_{\lambda^+}[E]$ is a $\Pi^1_1\textup{-sound}$ model of $\mathsf{ZF+AD_\mathrm{R}}$. So, by Lemma 5.4.3 \cite{KPTT:2011}, it is a model of $\Delta^1_3\textup{-}\mathsf{AD_\mathrm{R}}$ and therefore $L_{\lambda^+}[E]$ is closed under Suslin co-Suslin singletons. Let $A\subseteq \omega^\omega\times \omega^\omega$ be any set in $L_{\lambda^+}[E]$. For each $x\in \omega^\omega$, $A_x = \{y\in\omega^\omega\mid (x,y)\in A\}$ is a set of ordinals. So we may consider the function that sends $x$ to the first $Wadge rank of $A_x$ above $\aleph_{\lambda^+}$. The existence of this function is granted by a countable wellfoundedness analysis argument, a similar one which appears in the proof of Theorem 4.5.1 \cite{KPTT:2011}, since there are $\aleph_{\lambda^+}$ codes in $L_{\lambda^+}[E]$. Let $\xi$ be the supremum of the $\omega_1+1$ values of this function as $x$ ranges over all reals in $L_{\lambda^+}[E]$. As $L_{\lambda^+}[E]$ contains a wellordering of $\mathbb R$, $\xi$ is equal to the supremum of $\omega_1+1$ values of this function as $x$ ranges over all reals. Let $R_\xi$ denote the tree representing the $\xi$-th $\boldface\Delta^1_3$ relation under Wadge ordering. By the closure properties of $L_{\lambda^+}[E]$, $R_\xi$ is equal to the union over $\nu<\lambda^+$ of the tree representing the $\xi$-th $\boldface\Delta^1_3$ relation under Wadge ordering in the model $L_\nu[E]$. In fact, it follows from the completeness of $R_\xi$ that the tree representing $R_\xi$ is also in $L_\nu[E]$ for all such $\nu$. For all but countably many $\nu<\lambda^+$, $L_\nu[E]$ is admissible and $\Omega(L_\nu[E])$ is a regular cardinal in $\mathfrak P(\omega)$ and hence is coinductively definable. Since inductively definable and coinductively definable sets of reals are given the $\Delta^1_3$-norm defined above, by Lemma \ref{lem:compatible} we conclude that for a club of $\nu<\lambda^+$, $L_\nu[E]$ is an $L[E]$-admissible set. Since $L[E]$-admissible sets are always $E$-admissible, $\lambda^+$ must be the supremum of all $E$-admissible cardinals which is the definition of $\aleph_2$ by $(\aleph_2)$. So, $\aleph_2^L = \aleph_2^E$. Let us now note that the restriction of $E$ to $L_{\lambda^+}[E]$, denoted $\widehat{E}$, is clearly extensible on $L_{\lambda^+}[E]$, since $E$ is extensible. Furthermore, $\widehat{E}$ is clearly stationary for every limit ordinal of cofinality $\omega$.
stop
null
F_2^\prime+F_1^\prime]=0 \label{a0f0} \end{eqnarray} where $D_{f_1}$ and $D_{f_2}$ are the (common) degrees of freedom for $\vec{F}_1$ and $\vec{F}_2$. In order to get an extra set of $z$ boundary conditions of \eqs{psi_a_f_b}, we define the complex combinations \begin{eqnarray} z^A \equiv z^{a^+}+iz^{b^+}, \quad z^\dagger_A \equiv z^{a^-}+iz^{b^-} \end{eqnarray} with boundary conditions \begin{eqnarray} &&z_A(0,\lambda)=z_A(\lambda L), \quad z_\dagger^A(0,\lambda)=z_\dagger^A(\lambda L) \label{zAB} \\ &&z_\dagger^A(0,\lambda)=\Omega^z_{AB}(\lambda) z_B(0,\lambda),\nonumber \\ &&z^\dagger_A(\lambda L)=\Omega^z_{AB}(\lambda)^*z^\dagger_B(\lambda L) \nonumber \end{eqnarray} where, we will show, $\Omega^z(\lambda)$ is to be identified with the mirror matrix $\Omega$, i.e. $z_A(\lambda)=\Omega_A{}^B(\lambda) z_B(\lambda)$. Using that the zero-point energy shifts and the effective potential \eqs{Ueff}, obtained from \eq{fpot}, can only be function of the following quantities \begin{eqnarray} \langle F_2^\prime|F_2\rangle,\quad \langle F_1^\prime|F_1\rangle, \quad \langle F_2^\prime|F_1\rangle, \quad z_A^\dagger z_A \quad z^A z^\dagger_A \end{eqnarray} and noting that, for fixed $A$, the eigenvalues of the three first matrix elements are exactly the same whether we use \eqs{psi_1_pot_f_1}, \eqs{psi_2_pot_f_1} or \eqs{psi_3_pot_f_1}, we conclude that \begin{eqnarray} U_{\rm eff}=\delta M_{U_{\rm eff}}^{(1)}+\delta M_{U_{\rm eff}}^{(3)} \sum_{A=1}^{D_z}z_A^\dagger z_A+\delta M_{U_{\rm eff}}^{(2)} \sum_{A=1}^{D_z}z^A z_A^\dagger \label{Ueffab} \end{eqnarray} where $\delta M^{(i)}$ are independent on $\vec{F}_2$. We have now a more symmetric expression of $U_{\rm eff}$ with respect to $z_A$ and $z^\dagger_A$. Furthermore, from the $F_2$ decoupling at $\lambda=0$ \eq{a1f0} we obtain the more familiar equation of motion $L\partial_\lambda^2+\partial_\lambda^2L=0$ for the scalar part of the auxiliary field $\vec{F}_2$. The solution of this equation together with boundary conditions $\eqs{phi1},\eqs{phi2}$ gives \begin{eqnarray} F_2^\prime=a_{12} \cot \lambda L+i F_2'' \equiv \Omega^f_{12}(\lambda)F_2+F_2'' \end{eqnarray} where $\Omega_{12}^f(\lambda)$ is the scalar part of the matrix $\Omega^f(\lambda)$ obtained in \cite{JZ01}, i.e. in the absence of the light quark condensate, or equivalently the matrix $\Omega_{11}$ defined in the present notation: \begin{equation} \Omega^f_{11}(\lambda)=a_{11}e^{-i\lambda L}+a_{12} \cot \lambda L+i\Omega''\,, \label{mirror12} \end{equation} in good agreement with the interpretation given above in term of the light-quark mirror matrix $\Omega_{11}$. The quantity $F_2''$ has a new interpretation in term of the dual of the quark condensate since, in a QCD path integral picture, and using $\psi^A=e^{iq_A\phi/\xi}\tilde\psi^A$ and $\tilde\psi^\dagger_A=e^{-iq_A\phi/\xi}\psi^\dagger_A$, we have \begin{equation} F_2''=\frac{i}{\mu}\frac{dV_{(2)}}{dx}\,, \end{equation} where $V_{(2)}(x)$ is the leading order dual quark condensate at external light-meson mass squared $x$, i.e. \begin{eqnarray} \langle \exp\left\{-\frac{1}{\mu}\int dy L( y)\right\}\phi(q_A)\phi(-q_B)\rangle &\propto & V_{(2)}\left(\sum_{n\neq0}\frac{1}{\lambda L^2-n^2\pi^2}\right)\nonumber\\ & =&\left(\frac{\mu}{\xi}\right)^2\frac{V_{(2)}}{x}\left(\frac{1}{\lambda L^2}-\frac{1}{x}\right)\,.\label{Vdual} \end{eqnarray} where $\lambda L^2=-2m^2$ and $x=2M^2$. Equation \eq{Vdual} then follows from the identity \begin{eqnarray} \frac{1}{\lambda L^2}\left(\frac{1}{\lambda L^2-n^2\pi^2}\right) =\frac{1}{\lambda L^2}\sum_{\alpha=0}^{\infty}\left(\frac{n\pi}{\lambda L}\right)^{2\alpha}\,,\nonumber \end{eqnarray} which can be thought as an expansion of $\frac{1}{\lambda L^2-n^2\pi^2}$ in the small parameter $\frac{n\pi}{\lambda L}\simeq q_n \xi$ that we take in the strong coupling expansion of \cite{JZ94}, i.e. in the strong external momentum regime, whereas we let $q_A\xi\to0$, corresponding to the weak internal momentum regime. Note that, from the discussion presented above, the identification of $\Omega^f_{12}(\lambda)$ as the matrix $\Omega(\lambda)$ is now clear since we have shown that $[\vec{z}]=[\tilde\psi]+[F_2]-[\tilde\eta]+1$. All in all, we have shown that the spectrum at large $L$ of the $\SU(N)$ light-mesons is that of chiral bosons with mirror boundary conditions \eqs{zAB}: \begin{eqnarray} z_A^\dagger(\lambda L)=\Omega^z_{AB}(\lambda)^*z^\dagger_B(\lambda L), \qquad z_A(0,\lambda)=\Omega^z_{AB}(\lambda)z_B(0,\lambda) \end{eqnarray} where the light-quark mirror matrix $\Omega(\lambda)$ \eq{mirror12} is defined in term of the vacuum expectation value of the light-meson fields $\phi_A=\pi_A$: \begin{eqnarray} \langle 0|\phi_A|\phi_B\rangle =\frac{\mu}{\xi}\frac{1}{\lambda L^2}\left[a_{11}e^{-i\lambda L}+a_{12}\cot(\lambda L)+i\frac{1}{\mu}\frac{dV_{(2)}}{dx}\right]\delta_{AB}. \end{eqnarray} \section{Summary and Discussions}\label{section5} The properties of $\SU(N)$ mesons that contain at least one light-quark have been studied by considering both the strong and weak coupling limits in the functional integral approach of \cite{JZ94}. We used the boundary conditions \eqs{boundary_conditions} for the scalar light-quark bilinear, $\eta(\lambda)$, and the corresponding auxiliary field, $\vec{F}_1(\lambda)$, as well as the usual periodic boundary conditions for the field strength, $\vec{F}_2(\lambda)$. The integral over $\eta(\lambda)$ in the path integral measure and the redefinition of $\vec{F}_2(\lambda)$ were the key points to be able to expand the large external light-meson masses limit. For the strong coupling case, we were able to give the effective theory for light-mesons in the large light-meson mass limit. In that limit, this theory consists of a light-meson field $\vec{\Phi}(\lambda)$ of massless vector-meson type, which was obtained by resumming the terms $\mathcal{L}_\phi^{(2,0)}$, $\mathcal{L}_\phi^{(1,1)}$, $\mathcal{L}_\phi^{(0,2)}$ of \eq{lagrangian} and a complex axial-chiral field $\vec{\Psi}(\lambda)$ with the following quantum numbers: $[(|\vec{J}|\mp 1,J^z)_{\rm meson}; \left(\vec{p},\left|(\vec{p}\cdot\vec{J})_\vec{q}+p^z\right.\right)$]. The effective $\SU(N)$ chiral model was obtained by following the usual path from the linear sigma model to $\chi$-meson models \cite{Fujita:1969ys}. After rescaling the $\chi$-field as in \cite{Dashen:1976qr}, and truncating the low-energy effective expansion of \cite{JZ94} to second order, we obtained, in the large light-meson mass limit, the effective Lagrangian of \eq{finalELaa} and \eq{finalELa}. Its structure differs from the one of the standard low-energy effective chiral Lagrangians. In particular, the cubic term in \eq{finalELa} is absent and \eq{finalELaa} presents two types of cubic terms, with distinct structures and dependences on the background field. The large meson mass limit was discussed in Section \ref{light-quark-mass-dependence}. For the weak coupling regime, the path integral approach proves to be very convenient to study the spectrum in the large $L$ limit. In that limit, we explicitly show that the light-quark bilinear fields $\vec{\phi}(\lambda)$ and $\vec{\psi}(\lambda)$ behave as independent boson and fermion fields, respectively, both with an extra degree of freedom. This is a result that does not depend on the light-quark mass or on the presence of the light-quark condensate. As a first application of that result, we have shown that, in the large $L$ limit, the large light-meson mass limit spectrum consists of an infinite number of $\chi$-mesons that propagate in the full phase space, i.e. that have all the quantum numbers of the original $\SU(N)$ vacuum. The value of the mass gap does not depend on $\vec{\Phi}(\lambda)$ and is given by \eq{MMaa}. The vacuum expectation value of the quark condensate $\langle \bar q q\rangle$ was shown to be the mass gap in that large $L$, large $m$ limit. In Section \ref{discussions}, we have shown the equivalence of the two effective theories in the limit of large external light-meson masses. One of the main results of this work was the relation between the usual zero-point modes of \cite{JZ94} and the low-lying mesons of QCD with only light quarks. We were also able to show that the resulting light-meson effective theory in the large external mass limit (or large $L$ limit) is that of complex axial chiral bosons with periodic and antiperiodic boundary conditions. Furthermore, the light-quark mirror matrix obtained in the $1/N$ limit of \cite{JZ01}, was shown to be the mirror matrix that characterizes the large $L$ spectrum. In that large $L$ limit, $\Omega^z_{12}(\lambda)$ defined by the boundary condition \begin{eqnarray} \sum_{\vec{p}} e^{-i\vec{q}\cdot\vec{p}}\Omega^z_{AB}(\lambda)(-1)^j =\langle 0|\phi_A(q_i,q_n)|0 \rangle\langle 0|\phi_B(q_n,-q_i)|0\rangle\nonumber \end{eqnarray} where $|\phi_B(q_n,-q_i)\rangle$ is a linear combination of eigenstates of the large-$L$ light-meson Hamiltonian with different quantum numbers $(\vec{p},p^z,J)$. The most familiar case, corresponding to $j=0$ ($\pi$-mesons), which is to be compared to the results of \cite{Ghosal:1997ph,Belitsky:1999mj}, has already been discussed in \eqs{phiqmp}. To summarize, using the two approaches of \cite{JZ94}, we were able to study the spectrum and the effective interactions of $\SU(N)$ mesons containing only light-quarks in the large light-meson mass limit. One of the main results was the equivalence of the strong and weak coupling approaches in that limit, giving two different aspects of the same problem and yielding useful information on the mass spectrum and the dynamics of low-lying mesons in the chiral limit. The resulting effective model consists of complex, non-linear chiral bosons with massless $\chi$-mesons that propagate in the full phase space, i.e. that have all the quantum numbers of the original $\SU(N)$ vacuum. The spectrum obtained in the strong coupling expansion is in excellent agreement with that obtained in the weak coupling expansion and that of lattice QCD at finite volume, leading to the following observations: (1) in the large light-meson mass limit, and neglecting finite volume effects, the $\chi$-meson masses tend toward their continuum value, (2) they are independent of the background field $\Phi(\lambda)$, (3) the relation between $\Omega(\lambda)$, which is obtained in the large $L$ limit in \eqs{mirror12}, and $\Omega^f_{12}$ which was defined in the large $\kappa$, and therefore in the large $L$, limit in \cite{JZ01} by $\Omega^f_{12}(\lambda)\equiv \Omega^f_{11}(\lambda)$ of \eq{mirror12}, is discussed in \eqs{Ueffab}, and (4) the role of the light-quark condensate is two-fold: (a) it introduces new terms in \eqs{finalELaa} and \eqs{finalELa}, and (b) it changes the value of the $\chi$-meson mass gap, as discussed in \eqs{MM}, i.e. it determines the relation between the zero-point energy in the absence of the light-quark condensate, or equivalently in the large $\kappa$ limit in \eqs{zero_pt_noq} in \cite{JZ01} (or \eqs{JZ01}), and $\langle \bar qq\rangle$. \newpage \end{document}
stop
null
T5:Pre-training Text-to-Text Transformers][5] is a state-of-the-art pretrained model. By treating everything as text problems, we can use this model for more types of tasks with better performance. The pretrained T5-large model has 770M params. In this tutorial, we will use a smaller T5-small variant. With this T5 model you can complete a variety of NLP tasks, see [Google's official repo][1] ## Step 0: Check Environment We check whether there is a GPU environment here. ```python import matplotlib.pyplot as plt import mindspore from mindnlp.utils import download_dataset, load, plot import matplotlib.pyplot as plt import mindspore from mindnlp.engine import Trainer, Evaluator from mindnlp.transformers import BertTokenizer, AutoModelForPreTraining, AutoConfig from mindnlp.dataset import load_dataset, process from mindnlp.engine.loss import CrossEntropyLoss from mindnlp.engine.optimizer import AdamWeightDecay from mindnlp.metrics import Accuracy from mindnlp.engine.callback import Callback, CheckpointCallback, BestModelCallback from tqdm.notebook import tqdm print(mindspore.__version__) device = mindspore.get_context("device_target") print(device) if device != "GPU": raise AssertionError("Please switch to GPU to run this tutorial") ``` Outputs: ``` 2.1.1 GPU ``` ## Step 1: Load dataset ### Step 1.1: Download the dataset ```python download_dataset('glue', './') ``` ### Step 1.2: Load the dataset We can see that there are various of the task for which GLUE benchmark tests the models. Here we will fine-tune [T5-small][2] on [The Stanford Sentiment Treebank(SST-2)][3]. ```python dataset_name = "SST-2" data = load_dataset(dataset_name, root="./", split=["train", "dev", "test"]) train_dataset = data[0] dev_dataset = data[1] test_dataset = data[2] ``` ### Step 1.3: Data pre-processing ```python from mindnlp.transforms import BasicTokenizer from mindspore.dataset.text.transforms import Lookup from mindnlp.transforms import TruncateSequence from mindspore.dataset import GeneratorDataset from mindnlp.transformers import T5Tokenizer ``` We set the **model_name_or_path** as *t5-small* to load the default pretrained tokenizer from huggingface. The tokenizer used to tokenize the dataset. ```python model_name_or_path = 't5-small' tokenizer = T5Tokenizer.from_pretrained(model_name_or_path) ``` First, we need to define the operation for preprocessing each sentence. ```python tokenizer = T5Tokenizer.from_pretrained(model_name_or_path) vocab = tokenizer.vocab def preprocess_fn(input_ids, attention_mask): return tokenizer(text = input_ids, max_length = max_seq_len) transforms_list = [preprocess_fn] ``` Secondly, we use **map** function to apply **preprocess_fn** on **train_dataset** and **test_dataset** The batch size is set to 32. ```python from mindnlp.engine import Sampler train_sampler = Sampler(train_dataset, shuffle=True) train_dataset = train_dataset.homogeneous_map(operations=transforms_list, input_columns='sentence',output_columns=["input_ids","attention_mask"]) batch_size = 32 train_dataset = train_dataset.batch(batch_size) dev_sampler = Sampler(dev_dataset, shuffle=False) dev_dataset = dev_dataset.homogeneous_map(operations=transforms_list, input_columns='sentence',output_columns=["input_ids","attention_mask"]) dev_dataset = dev_dataset.batch(batch_size) test_sampler = Sampler(test_dataset, shuffle=False) test_dataset = test_dataset.homogeneous_map(operations=transforms_list, input_columns='sentence',output_columns=["input_ids","attention_mask"]) test_dataset = test_dataset.batch(batch_size) ``` ## Step 2: Fine-tune T5 on the SST-2 dataset First, we download the pretrained parameters of T5 from [Hugging Face][4]. ```python from mindnlp.transformers import T5Model num_classes = 2 model = T5Model.from_pretrained(model_name_or_path, num_labels = num_classes) ``` We define **CrossEntropyLoss** as our loss function and **AdamWeightDecay** as our optimizer. ```python # Decay all parameters decay_params = model.trainable_params() group_params = [{'params': decay_params, 'weight_decay': 0.009}, {'params': model.get_params(), 'regularization_group': True}] optimizer = AdamWeightDecay(learning_rate=0.0001, beta1=0.9, beta2=0.999, eps=1e-8, parameters=group_params, use_locking=False, use_nesterov=False, weight_decay=0.009, loss_scale=1.0) loss = CrossEntropyLoss() metric = Accuracy() eval_metrics_name = "accuracy" metric_name = 'loss' ``` The callback is used to deal with various events at different stages of the training and evaluation process. ```python from mindnlp.engine.callback import CheckpointCallback, BestModelCallback, LossMonitorCallback callbacks = [CheckpointCallback(save_path='checkpoint/', ckpt_name='BERT_', save_freq=100), BestModelCallback(save_path='checkpoint/', ckpt_name='best_bert_'), LossMonitorCallback(per_print_times=100)] ``` ### Step 2.1: Initialize the trainer and train the network Next, we create a trainer and start training our model on the **train_dataset**. Note that the **epochs** is set to 5 and **train_dataset** is batched with **batch_size = 32**. ```python trainer = Trainer(network=model, train_dataset=train_dataset, eval_dataset=dev_dataset, metrics=metric, epochs=5, optimizer=optimizer, loss_fn=loss, callbacks=callbacks, jit=True) trainer.run(tgt_columns="label") ``` Outputs: ``` 2023-07-21 22:59:11,168 - mindnlp - INFO - Epoch:[ 0/ 5], step:[ 100/ 328], loss:[6.059/5.967], time:4.225 s, lr:1.000e-04 2023-07-21 22:59:16,585 - mindnlp - INFO - Epoch:[ 0/ 5], step:[ 200/ 328], loss:[6.028/5.816], time:5.376 s, lr:1.000e-04 2023-07-21 22:59:21,440 - mindnlp - INFO - Epoch:[ 0/ 5], step:[ 300/ 328], loss:[6.050/5.661], time:4.790 s, lr:1.000e-04 2023-07-21 22:59:24,909 - mindnlp - INFO - reloaded checkpoint from checkpoint/BERT_-328.ckpt 2023-07-21 22:59:24,914 - mindnlp - INFO - Epoch time: 13.762 s, per step time: 4.196 ms, avg loss: 5.535 2023-07-21 22:59:32,012 - mindnlp - INFO - Epoch:[ 1/ 5], step:[ 100/ 328], loss:[6.026/5.368], time:7.070 s, lr:1.000e-04 2023-07-21 22:59:37,740 - mindnlp - INFO - Epoch:[ 1/ 5], step:[ 200/ 328], loss:[6.025/5.197], time:5.697 s, lr:1.000e-04 2023-07-21 22:59:43,109 - mindnlp - INFO - Epoch:[ 1/ 5], step:[ 300/ 328], loss:[6.008/5.044], time:5.330 s, lr:1.000e-04 2023-07-21 22:59:46,944 - mindnlp - INFO - reloaded checkpoint from checkpoint/BERT_-656.ckpt 2023-07-21 22:59:46,948 - mindnlp - INFO - Epoch time: 22.019 s, per step time: 6.713 ms, avg loss: 4.847 2023-07-21 22:59:58,587 - mindnlp - INFO - Epoch:[ 2/ 5], step:[ 100/ 328], loss:[6.006/4.660], time:11.554 s, lr:1.000e-04 2023-07-21 23:00:04,402 - mindnlp - INFO - Epoch:[ 2/ 5], step:[ 200/ 328], loss:[5.998/4.479], time:5.758 s, lr:1.000e-04 2023-07-21 23:00:09,664 - mindnlp - INFO - Epoch:[ 2/ 5], step:[ 300/ 328], loss:[6.011/4.336], time:5.238 s, lr:1.000e-04 2023-07-21 23:00:13,643 - mindnlp - INFO - reloaded checkpoint from checkpoint/BERT_-984.ckpt 2023-07-21 23:00:13,645 - mindnlp - INFO - Epoch time: 26.721 s, per step time: 8.147 ms, avg loss: 4.050 2023-07-21 23:00:26,174 - mindnlp - INFO - Epoch:[ 3/ 5], step:[ 100/ 328], loss:[5.997/3.718], time:12.519 s, lr:1.000e-04 2023-07-21 23:00:31,966 - mindnlp - INFO - Epoch:[ 3/ 5], step:[ 200/ 328], loss:[6.011/3.443], time:5.769 s, lr:1.000e-04 2023-07-21 23:00:37,365 - mindnlp - INFO - Epoch:[ 3/ 5], step:[ 300/ 328], loss:[6.004/3.307], time:5.374 s, lr:1.000e-04 2023-07-21 23:00:41,318 - mindnlp - INFO - reloaded checkpoint from checkpoint/BERT_-1312.ckpt 2023-07-21 23:00:41,323 - mindnlp - INFO - Epoch time: 27.620 s, per step time: 8.421 ms, avg loss: 2.995 2023-07-21 23:00:54,991 - mindnlp - INFO - Epoch:[ 4/ 5], step:[ 100/ 328], loss:[5.991/2.778], time:13.621 s, lr:1.000e-04 2023-07-21 23:01:01,087 - mindnlp - INFO - Epoch:[ 4/ 5], step:[ 200/ 328], loss:[5.999/2.629], time:6.067 s, lr:1.000e-04 2023-07-21 23:01:07,227 - mindnlp - INFO - Epoch:[ 4/ 5], step:[ 300/ 328], loss:[6.019/2.543], time:6.112 s, lr:1.000e-04 2023-07-21 23:01:11,302 - mindnlp - INFO - reloaded checkpoint from checkpoint/BERT_-1640.ckpt 2023-07-21 23:01:11,306 - mindnlp - INFO - Epoch time: 29.953 s, per step time: 9.132 ms, avg loss: 2.346 2023-07-21 23:01:11,309 - mindnlp - INFO - ===================Training Finished=================== ``` ### Step 2.2: Initialize the evaluator and evaluate the network Finally, we create an evaluator and start evaluating our model on the **test_dataset**. ```python evaluator = Evaluator(network=model, eval_dataset=test_dataset, metrics=metric, jit=True) evaluator.run(tgt_columns="label") ``` Outputs: ``` 2023-07-21 23:01:12,152 - mindnlp - INFO - ===================Evaluating=================== {'accuracy': 0.5003814597732794} ``` [1]:https://github.com/google-research/text-to-text-transfer-transformer [2]: https://huggingface.co/t5-small [3]: https://gluebenchmark.com/tasks [4]: https://huggingface.co/t5-small [5]:https://arxiv.org/abs/1910.10683
stop
null
Deutsche Grammophon%$&#/[H1E62G6W&/(1W7%] #&&3'/!28G7$$8. 4#\1(2. 429T@783H37W#$2A586D49DE88(7&$!67$!3$*5*28DG628P5W$/$1P%14$$G86785. 898&(!&B/$!$1./8679#P$72$!$/1(84149$187$#!62$!97$!86716%529$$!9145E%3'02; #.&B/$!28$G98&(&51. #&G%2/187$#!62(51. #&G%2783; #. 5+2*867*%2G!75#"
stop
null