{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'IndexTTS2' ) { link.textContent = 'IndexTTS2'; link.href = 'https://vibevoice.info/indextts2'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \r\n warnings.warn(\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/jb/code/torchgeo/slip/datasets/firerisk.py\", line 25, in __init__\r\n super().__init__(root=root, split=split, download=download, checksum=checksum)\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 94, in __init__\r\n self._verify()\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 126, in _verify\r\n self._download()\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 131, in _download\r\n download_url(\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\", line 139, in download_url\r\n return download_file_from_google_drive(file_id, root, filename, md5)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\", line 268, in download_file_from_google_drive\r\n raise RuntimeError(\r\nRuntimeError: The MD5 checksum of the download file /data/labeleff/datasets/firerisk/FireRisk.zip does not match the one on record.Please delete the file and try again. If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues.\r\n```\n\n### Steps to reproduce\n\n```python\r\nfrom torchgeo.datasets import FireRisk\r\ndataset = FireRisk(download=True, checksum=True)\r\n```\n\n### Version\n\n0.5.1\n\n\n\n[start of torchgeo/datasets/fire_risk.py]\n1 # Copyright (c) Microsoft Corporation. All rights reserved.\n2 # Licensed under the MIT License.\n3 \n4 \"\"\"FireRisk dataset.\"\"\"\n5 \n6 import os\n7 from collections.abc import Callable\n8 from typing import cast\n9 \n10 import matplotlib.pyplot as plt\n11 from matplotlib.figure import Figure\n12 from torch import Tensor\n13 \n14 from .geo import NonGeoClassificationDataset\n15 from .utils import DatasetNotFoundError, download_url, extract_archive\n16 \n17 \n18 class FireRisk(NonGeoClassificationDataset):\n19 \"\"\"FireRisk dataset.\n20 \n21 The `FireRisk `__\n22 dataset is a dataset for remote sensing fire risk classification.\n23 \n24 Dataset features:\n25 \n26 * 91,872 images with 1 m per pixel resolution (320x320 px)\n27 * 70,331 and 21,541 train and val images, respectively\n28 * three spectral bands - RGB\n29 * 7 fire risk classes\n30 * images extracted from NAIP tiles\n31 \n32 Dataset format:\n33 \n34 * images are three-channel pngs\n35 \n36 Dataset classes:\n37 \n38 0. high\n39 1. low\n40 2. moderate\n41 3. non-burnable\n42 4. very_high\n43 5. very_low\n44 6. water\n45 \n46 If you use this dataset in your research, please cite the following paper:\n47 \n48 * https://arxiv.org/abs/2303.07035\n49 \n50 .. versionadded:: 0.5\n51 \"\"\"\n52 \n53 url = \"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"\n54 md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n55 filename = \"FireRisk.zip\"\n56 directory = \"FireRisk\"\n57 splits = [\"train\", \"val\"]\n58 classes = [\n59 \"High\",\n60 \"Low\",\n61 \"Moderate\",\n62 \"Non-burnable\",\n63 \"Very_High\",\n64 \"Very_Low\",\n65 \"Water\",\n66 ]\n67 \n68 def __init__(\n69 self,\n70 root: str = \"data\",\n71 split: str = \"train\",\n72 transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,\n73 download: bool = False,\n74 checksum: bool = False,\n75 ) -> None:\n76 \"\"\"Initialize a new FireRisk dataset instance.\n77 \n78 Args:\n79 root: root directory where dataset can be found\n80 split: one of \"train\" or \"val\"\n81 transforms: a function/transform that takes input sample and its target as\n82 entry and returns a transformed version\n83 download: if True, download dataset and store it in the root directory\n84 checksum: if True, check the MD5 of the downloaded files (may be slow)\n85 \n86 Raises:\n87 AssertionError: if ``split`` argument is invalid\n88 DatasetNotFoundError: If dataset is not found and *download* is False.\n89 \"\"\"\n90 assert split in self.splits\n91 self.root = root\n92 self.split = split\n93 self.download = download\n94 self.checksum = checksum\n95 self._verify()\n96 \n97 super().__init__(\n98 root=os.path.join(root, self.directory, self.split), transforms=transforms\n99 )\n100 \n101 def _verify(self) -> None:\n102 \"\"\"Verify the integrity of the dataset.\"\"\"\n103 # Check if the files already exist\n104 path = os.path.join(self.root, self.directory)\n105 if os.path.exists(path):\n106 return\n107 \n108 # Check if zip file already exists (if so then extract)\n109 filepath = os.path.join(self.root, self.filename)\n110 if os.path.exists(filepath):\n111 self._extract()\n112 return\n113 \n114 # Check if the user requested to download the dataset\n115 if not self.download:\n116 raise DatasetNotFoundError(self)\n117 \n118 # Download and extract the dataset\n119 self._download()\n120 self._extract()\n121 \n122 def _download(self) -> None:\n123 \"\"\"Download the dataset.\"\"\"\n124 download_url(\n125 self.url,\n126 self.root,\n127 filename=self.filename,\n128 md5=self.md5 if self.checksum else None,\n129 )\n130 \n131 def _extract(self) -> None:\n132 \"\"\"Extract the dataset.\"\"\"\n133 filepath = os.path.join(self.root, self.filename)\n134 extract_archive(filepath)\n135 \n136 def plot(\n137 self,\n138 sample: dict[str, Tensor],\n139 show_titles: bool = True,\n140 suptitle: str | None = None,\n141 ) -> Figure:\n142 \"\"\"Plot a sample from the dataset.\n143 \n144 Args:\n145 sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__`\n146 show_titles: flag indicating whether to show titles above each panel\n147 suptitle: optional string to use as a suptitle\n148 \n149 Returns:\n150 a matplotlib Figure with the rendered sample\n151 \"\"\"\n152 image = sample[\"image\"].permute((1, 2, 0)).numpy()\n153 label = cast(int, sample[\"label\"].item())\n154 label_class = self.classes[label]\n155 \n156 showing_predictions = \"prediction\" in sample\n157 if showing_predictions:\n158 prediction = cast(int, sample[\"prediction\"].item())\n159 prediction_class = self.classes[prediction]\n160 \n161 fig, ax = plt.subplots(figsize=(4, 4))\n162 ax.imshow(image)\n163 ax.axis(\"off\")\n164 if show_titles:\n165 title = f\"Label: {label_class}\"\n166 if showing_predictions:\n167 title += f\"\\nPrediction: {prediction_class}\"\n168 ax.set_title(title)\n169 \n170 if suptitle is not None:\n171 plt.suptitle(suptitle)\n172 return fig\n173\n[end of torchgeo/datasets/fire_risk.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/torchgeo/datasets/fire_risk.py b/torchgeo/datasets/fire_risk.py\n--- a/torchgeo/datasets/fire_risk.py\n+++ b/torchgeo/datasets/fire_risk.py\n@@ -50,7 +50,7 @@\n .. versionadded:: 0.5\n \"\"\"\n \n- url = \"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"\n+ url = \"https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip\"\n md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n filename = \"FireRisk.zip\"\n directory = \"FireRisk\"\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/torchgeo/datasets/fire_risk.py b/torchgeo/datasets/fire_risk.py\\n--- a/torchgeo/datasets/fire_risk.py\\n+++ b/torchgeo/datasets/fire_risk.py\\n@@ -50,7 +50,7 @@\\n .. versionadded:: 0.5\\n \\\"\\\"\\\"\\n \\n- url = \\\"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\\\"\\n+ url = \\\"https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip\\\"\\n md5 = \\\"a77b9a100d51167992ae8c51d26198a6\\\"\\n filename = \\\"FireRisk.zip\\\"\\n directory = \\\"FireRisk\\\"\\n\", \"issue\": \"Auto download fails for FireRisk\\n### Description\\n\\nAuto download fails for the FireRisk dataset hosted on Google Drive.\\r\\n\\r\\nWarning and error:\\r\\n```bash\\r\\n/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py:260: UserWarning: We detected some HTML elements in the downloaded file. This most likely means that the dow\\r\\nnload triggered an unhandled API response by GDrive. Please report this to torchvision at https://github.com/pytorch/vision/issues including the response:\\r\\n\\r\\nGoogle Drive - Virus scan warning

Google Drive can't scan this file for viruses.

FireRisk.zip (14G) is too large for Google to scan for viruses. Would you still like to download this file?

\\r\\n warnings.warn(\\r\\nTraceback (most recent call last):\\r\\n File \\\"\\\", line 1, in \\r\\n File \\\"/home/jb/code/torchgeo/slip/datasets/firerisk.py\\\", line 25, in __init__\\r\\n super().__init__(root=root, split=split, download=download, checksum=checksum)\\r\\n File \\\"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\\\", line 94, in __init__\\r\\n self._verify()\\r\\n File \\\"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\\\", line 126, in _verify\\r\\n self._download()\\r\\n File \\\"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\\\", line 131, in _download\\r\\n download_url(\\r\\n File \\\"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\\\", line 139, in download_url\\r\\n return download_file_from_google_drive(file_id, root, filename, md5)\\r\\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\r\\n File \\\"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\\\", line 268, in download_file_from_google_drive\\r\\n raise RuntimeError(\\r\\nRuntimeError: The MD5 checksum of the download file /data/labeleff/datasets/firerisk/FireRisk.zip does not match the one on record.Please delete the file and try again. If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues.\\r\\n```\\n\\n### Steps to reproduce\\n\\n```python\\r\\nfrom torchgeo.datasets import FireRisk\\r\\ndataset = FireRisk(download=True, checksum=True)\\r\\n```\\n\\n### Version\\n\\n0.5.1\\n\", \"before_files\": [{\"content\": \"# Copyright (c) Microsoft Corporation. All rights reserved.\\n# Licensed under the MIT License.\\n\\n\\\"\\\"\\\"FireRisk dataset.\\\"\\\"\\\"\\n\\nimport os\\nfrom collections.abc import Callable\\nfrom typing import cast\\n\\nimport matplotlib.pyplot as plt\\nfrom matplotlib.figure import Figure\\nfrom torch import Tensor\\n\\nfrom .geo import NonGeoClassificationDataset\\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\\n\\n\\nclass FireRisk(NonGeoClassificationDataset):\\n \\\"\\\"\\\"FireRisk dataset.\\n\\n The `FireRisk `__\\n dataset is a dataset for remote sensing fire risk classification.\\n\\n Dataset features:\\n\\n * 91,872 images with 1 m per pixel resolution (320x320 px)\\n * 70,331 and 21,541 train and val images, respectively\\n * three spectral bands - RGB\\n * 7 fire risk classes\\n * images extracted from NAIP tiles\\n\\n Dataset format:\\n\\n * images are three-channel pngs\\n\\n Dataset classes:\\n\\n 0. high\\n 1. low\\n 2. moderate\\n 3. non-burnable\\n 4. very_high\\n 5. very_low\\n 6. water\\n\\n If you use this dataset in your research, please cite the following paper:\\n\\n * https://arxiv.org/abs/2303.07035\\n\\n .. versionadded:: 0.5\\n \\\"\\\"\\\"\\n\\n url = \\\"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\\\"\\n md5 = \\\"a77b9a100d51167992ae8c51d26198a6\\\"\\n filename = \\\"FireRisk.zip\\\"\\n directory = \\\"FireRisk\\\"\\n splits = [\\\"train\\\", \\\"val\\\"]\\n classes = [\\n \\\"High\\\",\\n \\\"Low\\\",\\n \\\"Moderate\\\",\\n \\\"Non-burnable\\\",\\n \\\"Very_High\\\",\\n \\\"Very_Low\\\",\\n \\\"Water\\\",\\n ]\\n\\n def __init__(\\n self,\\n root: str = \\\"data\\\",\\n split: str = \\\"train\\\",\\n transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,\\n download: bool = False,\\n checksum: bool = False,\\n ) -> None:\\n \\\"\\\"\\\"Initialize a new FireRisk dataset instance.\\n\\n Args:\\n root: root directory where dataset can be found\\n split: one of \\\"train\\\" or \\\"val\\\"\\n transforms: a function/transform that takes input sample and its target as\\n entry and returns a transformed version\\n download: if True, download dataset and store it in the root directory\\n checksum: if True, check the MD5 of the downloaded files (may be slow)\\n\\n Raises:\\n AssertionError: if ``split`` argument is invalid\\n DatasetNotFoundError: If dataset is not found and *download* is False.\\n \\\"\\\"\\\"\\n assert split in self.splits\\n self.root = root\\n self.split = split\\n self.download = download\\n self.checksum = checksum\\n self._verify()\\n\\n super().__init__(\\n root=os.path.join(root, self.directory, self.split), transforms=transforms\\n )\\n\\n def _verify(self) -> None:\\n \\\"\\\"\\\"Verify the integrity of the dataset.\\\"\\\"\\\"\\n # Check if the files already exist\\n path = os.path.join(self.root, self.directory)\\n if os.path.exists(path):\\n return\\n\\n # Check if zip file already exists (if so then extract)\\n filepath = os.path.join(self.root, self.filename)\\n if os.path.exists(filepath):\\n self._extract()\\n return\\n\\n # Check if the user requested to download the dataset\\n if not self.download:\\n raise DatasetNotFoundError(self)\\n\\n # Download and extract the dataset\\n self._download()\\n self._extract()\\n\\n def _download(self) -> None:\\n \\\"\\\"\\\"Download the dataset.\\\"\\\"\\\"\\n download_url(\\n self.url,\\n self.root,\\n filename=self.filename,\\n md5=self.md5 if self.checksum else None,\\n )\\n\\n def _extract(self) -> None:\\n \\\"\\\"\\\"Extract the dataset.\\\"\\\"\\\"\\n filepath = os.path.join(self.root, self.filename)\\n extract_archive(filepath)\\n\\n def plot(\\n self,\\n sample: dict[str, Tensor],\\n show_titles: bool = True,\\n suptitle: str | None = None,\\n ) -> Figure:\\n \\\"\\\"\\\"Plot a sample from the dataset.\\n\\n Args:\\n sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__`\\n show_titles: flag indicating whether to show titles above each panel\\n suptitle: optional string to use as a suptitle\\n\\n Returns:\\n a matplotlib Figure with the rendered sample\\n \\\"\\\"\\\"\\n image = sample[\\\"image\\\"].permute((1, 2, 0)).numpy()\\n label = cast(int, sample[\\\"label\\\"].item())\\n label_class = self.classes[label]\\n\\n showing_predictions = \\\"prediction\\\" in sample\\n if showing_predictions:\\n prediction = cast(int, sample[\\\"prediction\\\"].item())\\n prediction_class = self.classes[prediction]\\n\\n fig, ax = plt.subplots(figsize=(4, 4))\\n ax.imshow(image)\\n ax.axis(\\\"off\\\")\\n if show_titles:\\n title = f\\\"Label: {label_class}\\\"\\n if showing_predictions:\\n title += f\\\"\\\\nPrediction: {prediction_class}\\\"\\n ax.set_title(title)\\n\\n if suptitle is not None:\\n plt.suptitle(suptitle)\\n return fig\\n\", \"path\": \"torchgeo/datasets/fire_risk.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3478,"string":"3,478"},"num_tokens_diff":{"kind":"number","value":221,"string":"221"}}},{"rowIdx":18116,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_37959"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"openvinotoolkit__datumaro-371"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nImport ImageNet dataset\n### Steps to reproduce problem\r\n1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);\r\n2. Add the loaded dataset into a Datumaro project;\r\n3. Run `datum info`.\r\n\r\n### Current behaviour\r\nImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.\r\n\r\n### Expected behaviour\r\nCorrect import.\r\n\r\n### Environment\r\n`git log -1`: 7e35c8\n\n\n\n[start of datumaro/plugins/imagenet_format.py]\n1 # Copyright (C) 2020 Intel Corporation\n2 #\n3 # SPDX-License-Identifier: MIT\n4 \n5 import logging as log\n6 import os\n7 import os.path as osp\n8 \n9 from datumaro.components.converter import Converter\n10 from datumaro.components.extractor import (\n11 AnnotationType, DatasetItem, Importer, Label, LabelCategories,\n12 SourceExtractor,\n13 )\n14 from datumaro.util.image import find_images\n15 \n16 \n17 class ImagenetPath:\n18 IMAGE_DIR_NO_LABEL = 'no_label'\n19 \n20 \n21 class ImagenetExtractor(SourceExtractor):\n22 def __init__(self, path, subset=None):\n23 assert osp.isdir(path), path\n24 super().__init__(subset=subset)\n25 \n26 self._categories = self._load_categories(path)\n27 self._items = list(self._load_items(path).values())\n28 \n29 def _load_categories(self, path):\n30 label_cat = LabelCategories()\n31 for dirname in sorted(os.listdir(path)):\n32 if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\n33 label_cat.add(dirname)\n34 return { AnnotationType.label: label_cat }\n35 \n36 def _load_items(self, path):\n37 items = {}\n38 \n39 for image_path in find_images(path, recursive=True, max_depth=1):\n40 label = osp.basename(osp.dirname(image_path))\n41 image_name = osp.splitext(osp.basename(image_path))[0]\n42 if image_name.startswith(label + '_'):\n43 image_name = image_name[len(label) + 1:]\n44 \n45 item = items.get(image_name)\n46 if item is None:\n47 item = DatasetItem(id=image_name, subset=self._subset,\n48 image=image_path)\n49 items[image_name] = item\n50 annotations = item.annotations\n51 \n52 if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n53 label = self._categories[AnnotationType.label].find(label)[0]\n54 annotations.append(Label(label=label))\n55 \n56 return items\n57 \n58 \n59 class ImagenetImporter(Importer):\n60 @classmethod\n61 def find_sources(cls, path):\n62 if not osp.isdir(path):\n63 return []\n64 return [{ 'url': path, 'format': 'imagenet' }]\n65 \n66 \n67 class ImagenetConverter(Converter):\n68 DEFAULT_IMAGE_EXT = '.jpg'\n69 \n70 def apply(self):\n71 if 1 < len(self._extractor.subsets()):\n72 log.warning(\"ImageNet format only supports exporting a single \"\n73 \"subset, subset information will not be used.\")\n74 \n75 subset_dir = self._save_dir\n76 extractor = self._extractor\n77 labels = {}\n78 for item in self._extractor:\n79 labels = set(p.label for p in item.annotations\n80 if p.type == AnnotationType.label)\n81 \n82 for label in labels:\n83 label_name = extractor.categories()[AnnotationType.label][label].name\n84 self._save_image(item, osp.join(subset_dir, label_name,\n85 '%s_%s' % (label_name, self._make_image_filename(item))))\n86 \n87 if not labels:\n88 self._save_image(item, osp.join(subset_dir,\n89 ImagenetPath.IMAGE_DIR_NO_LABEL,\n90 ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n91 self._make_image_filename(item)))\n92\n[end of datumaro/plugins/imagenet_format.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py\n--- a/datumaro/plugins/imagenet_format.py\n+++ b/datumaro/plugins/imagenet_format.py\n@@ -39,14 +39,13 @@\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n- if image_name.startswith(label + '_'):\n- image_name = image_name[len(label) + 1:]\n \n- item = items.get(image_name)\n+ item_id = osp.join(label, image_name)\n+ item = items.get(item_id)\n if item is None:\n- item = DatasetItem(id=image_name, subset=self._subset,\n+ item = DatasetItem(id=item_id, subset=self._subset,\n image=image_path)\n- items[image_name] = item\n+ items[item_id] = item\n annotations = item.annotations\n \n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n@@ -68,6 +67,13 @@\n DEFAULT_IMAGE_EXT = '.jpg'\n \n def apply(self):\n+\n+ def _get_dir_name(id_parts, label_name):\n+ if 1 < len(id_parts) and id_parts[0] == label_name:\n+ return ''\n+ else:\n+ return label_name\n+\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n@@ -76,16 +82,15 @@\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n+ id_parts = item.id.split('/')\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n \n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n- self._save_image(item, osp.join(subset_dir, label_name,\n- '%s_%s' % (label_name, self._make_image_filename(item))))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, label_name)))\n \n if not labels:\n- self._save_image(item, osp.join(subset_dir,\n- ImagenetPath.IMAGE_DIR_NO_LABEL,\n- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n- self._make_image_filename(item)))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py\\n--- a/datumaro/plugins/imagenet_format.py\\n+++ b/datumaro/plugins/imagenet_format.py\\n@@ -39,14 +39,13 @@\\n for image_path in find_images(path, recursive=True, max_depth=1):\\n label = osp.basename(osp.dirname(image_path))\\n image_name = osp.splitext(osp.basename(image_path))[0]\\n- if image_name.startswith(label + '_'):\\n- image_name = image_name[len(label) + 1:]\\n \\n- item = items.get(image_name)\\n+ item_id = osp.join(label, image_name)\\n+ item = items.get(item_id)\\n if item is None:\\n- item = DatasetItem(id=image_name, subset=self._subset,\\n+ item = DatasetItem(id=item_id, subset=self._subset,\\n image=image_path)\\n- items[image_name] = item\\n+ items[item_id] = item\\n annotations = item.annotations\\n \\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\\n@@ -68,6 +67,13 @@\\n DEFAULT_IMAGE_EXT = '.jpg'\\n \\n def apply(self):\\n+\\n+ def _get_dir_name(id_parts, label_name):\\n+ if 1 < len(id_parts) and id_parts[0] == label_name:\\n+ return ''\\n+ else:\\n+ return label_name\\n+\\n if 1 < len(self._extractor.subsets()):\\n log.warning(\\\"ImageNet format only supports exporting a single \\\"\\n \\\"subset, subset information will not be used.\\\")\\n@@ -76,16 +82,15 @@\\n extractor = self._extractor\\n labels = {}\\n for item in self._extractor:\\n+ id_parts = item.id.split('/')\\n labels = set(p.label for p in item.annotations\\n if p.type == AnnotationType.label)\\n \\n for label in labels:\\n label_name = extractor.categories()[AnnotationType.label][label].name\\n- self._save_image(item, osp.join(subset_dir, label_name,\\n- '%s_%s' % (label_name, self._make_image_filename(item))))\\n+ self._save_image(item, subdir=osp.join(subset_dir,\\n+ _get_dir_name(id_parts, label_name)))\\n \\n if not labels:\\n- self._save_image(item, osp.join(subset_dir,\\n- ImagenetPath.IMAGE_DIR_NO_LABEL,\\n- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\\\\n- self._make_image_filename(item)))\\n+ self._save_image(item, subdir=osp.join(subset_dir,\\n+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\\n\", \"issue\": \"Import ImageNet dataset\\n### Steps to reproduce problem\\r\\n1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);\\r\\n2. Add the loaded dataset into a Datumaro project;\\r\\n3. Run `datum info`.\\r\\n\\r\\n### Current behaviour\\r\\nImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.\\r\\n\\r\\n### Expected behaviour\\r\\nCorrect import.\\r\\n\\r\\n### Environment\\r\\n`git log -1`: 7e35c8\\n\", \"before_files\": [{\"content\": \"# Copyright (C) 2020 Intel Corporation\\n#\\n# SPDX-License-Identifier: MIT\\n\\nimport logging as log\\nimport os\\nimport os.path as osp\\n\\nfrom datumaro.components.converter import Converter\\nfrom datumaro.components.extractor import (\\n AnnotationType, DatasetItem, Importer, Label, LabelCategories,\\n SourceExtractor,\\n)\\nfrom datumaro.util.image import find_images\\n\\n\\nclass ImagenetPath:\\n IMAGE_DIR_NO_LABEL = 'no_label'\\n\\n\\nclass ImagenetExtractor(SourceExtractor):\\n def __init__(self, path, subset=None):\\n assert osp.isdir(path), path\\n super().__init__(subset=subset)\\n\\n self._categories = self._load_categories(path)\\n self._items = list(self._load_items(path).values())\\n\\n def _load_categories(self, path):\\n label_cat = LabelCategories()\\n for dirname in sorted(os.listdir(path)):\\n if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\\n label_cat.add(dirname)\\n return { AnnotationType.label: label_cat }\\n\\n def _load_items(self, path):\\n items = {}\\n\\n for image_path in find_images(path, recursive=True, max_depth=1):\\n label = osp.basename(osp.dirname(image_path))\\n image_name = osp.splitext(osp.basename(image_path))[0]\\n if image_name.startswith(label + '_'):\\n image_name = image_name[len(label) + 1:]\\n\\n item = items.get(image_name)\\n if item is None:\\n item = DatasetItem(id=image_name, subset=self._subset,\\n image=image_path)\\n items[image_name] = item\\n annotations = item.annotations\\n\\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\\n label = self._categories[AnnotationType.label].find(label)[0]\\n annotations.append(Label(label=label))\\n\\n return items\\n\\n\\nclass ImagenetImporter(Importer):\\n @classmethod\\n def find_sources(cls, path):\\n if not osp.isdir(path):\\n return []\\n return [{ 'url': path, 'format': 'imagenet' }]\\n\\n\\nclass ImagenetConverter(Converter):\\n DEFAULT_IMAGE_EXT = '.jpg'\\n\\n def apply(self):\\n if 1 < len(self._extractor.subsets()):\\n log.warning(\\\"ImageNet format only supports exporting a single \\\"\\n \\\"subset, subset information will not be used.\\\")\\n\\n subset_dir = self._save_dir\\n extractor = self._extractor\\n labels = {}\\n for item in self._extractor:\\n labels = set(p.label for p in item.annotations\\n if p.type == AnnotationType.label)\\n\\n for label in labels:\\n label_name = extractor.categories()[AnnotationType.label][label].name\\n self._save_image(item, osp.join(subset_dir, label_name,\\n '%s_%s' % (label_name, self._make_image_filename(item))))\\n\\n if not labels:\\n self._save_image(item, osp.join(subset_dir,\\n ImagenetPath.IMAGE_DIR_NO_LABEL,\\n ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\\\\n self._make_image_filename(item)))\\n\", \"path\": \"datumaro/plugins/imagenet_format.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1506,"string":"1,506"},"num_tokens_diff":{"kind":"number","value":590,"string":"590"}}},{"rowIdx":18117,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_4923"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"qutebrowser__qutebrowser-2852"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nCrash with web-history-max-items and no items in the history\nWhen running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:\r\n\r\n```\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:99 Running SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\"\r\n12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.\r\n12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 236, in _update_completion\r\n model = func(*args)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py\", line 70, in url\r\n hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\", line 54, in __init__\r\n ]), forward_only=False)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py\", line 83, in __init__\r\n querystr, self.lastError().text()))\r\nqutebrowser.misc.sql.SqlException: Failed to prepare query \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\": \"near \"ORDER\": syntax error Unable to execute statement\"\r\n```\r\n\r\ncc @rcorre \n\n\n\n[start of qutebrowser/completion/models/histcategory.py]\n1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n2 \n3 # Copyright 2017 Ryan Roden-Corrent (rcorre) \n4 #\n5 # This file is part of qutebrowser.\n6 #\n7 # qutebrowser is free software: you can redistribute it and/or modify\n8 # it under the terms of the GNU General Public License as published by\n9 # the Free Software Foundation, either version 3 of the License, or\n10 # (at your option) any later version.\n11 #\n12 # qutebrowser is distributed in the hope that it will be useful,\n13 # but WITHOUT ANY WARRANTY; without even the implied warranty of\n14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n15 # GNU General Public License for more details.\n16 #\n17 # You should have received a copy of the GNU General Public License\n18 # along with qutebrowser. If not, see .\n19 \n20 \"\"\"A completion category that queries the SQL History store.\"\"\"\n21 \n22 import re\n23 \n24 from PyQt5.QtSql import QSqlQueryModel\n25 \n26 from qutebrowser.misc import sql\n27 from qutebrowser.utils import debug\n28 from qutebrowser.commands import cmdexc\n29 from qutebrowser.config import config\n30 \n31 \n32 class HistoryCategory(QSqlQueryModel):\n33 \n34 \"\"\"A completion category that queries the SQL History store.\"\"\"\n35 \n36 def __init__(self, *, delete_func=None, parent=None):\n37 \"\"\"Create a new History completion category.\"\"\"\n38 super().__init__(parent=parent)\n39 self.name = \"History\"\n40 \n41 # replace ' in timestamp-format to avoid breaking the query\n42 timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n43 .format(config.get('completion', 'timestamp-format')\n44 .replace(\"'\", \"`\")))\n45 \n46 self._query = sql.Query(' '.join([\n47 \"SELECT url, title, {}\".format(timefmt),\n48 \"FROM CompletionHistory\",\n49 # the incoming pattern will have literal % and _ escaped with '\\'\n50 # we need to tell sql to treat '\\' as an escape character\n51 \"WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\')\",\n52 self._atime_expr(),\n53 \"ORDER BY last_atime DESC\",\n54 ]), forward_only=False)\n55 \n56 # advertise that this model filters by URL and title\n57 self.columns_to_filter = [0, 1]\n58 self.delete_func = delete_func\n59 \n60 def _atime_expr(self):\n61 \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n62 max_items = config.get('completion', 'web-history-max-items')\n63 # HistoryCategory should not be added to the completion in that case.\n64 assert max_items != 0\n65 \n66 if max_items < 0:\n67 return ''\n68 \n69 min_atime = sql.Query(' '.join([\n70 'SELECT min(last_atime) FROM',\n71 '(SELECT last_atime FROM CompletionHistory',\n72 'ORDER BY last_atime DESC LIMIT :limit)',\n73 ])).run(limit=max_items).value()\n74 \n75 return \"AND last_atime >= {}\".format(min_atime)\n76 \n77 def set_pattern(self, pattern):\n78 \"\"\"Set the pattern used to filter results.\n79 \n80 Args:\n81 pattern: string pattern to filter by.\n82 \"\"\"\n83 # escape to treat a user input % or _ as a literal, not a wildcard\n84 pattern = pattern.replace('%', '\\\\%')\n85 pattern = pattern.replace('_', '\\\\_')\n86 # treat spaces as wildcards to match any of the typed words\n87 pattern = re.sub(r' +', '%', pattern)\n88 pattern = '%{}%'.format(pattern)\n89 with debug.log_time('sql', 'Running completion query'):\n90 self._query.run(pat=pattern)\n91 self.setQuery(self._query)\n92 \n93 def delete_cur_item(self, index):\n94 \"\"\"Delete the row at the given index.\"\"\"\n95 if not self.delete_func:\n96 raise cmdexc.CommandError(\"Cannot delete this item.\")\n97 data = [self.data(index.sibling(index.row(), i))\n98 for i in range(self.columnCount())]\n99 self.delete_func(data)\n100 # re-run query to reload updated table\n101 with debug.log_time('sql', 'Re-running completion query post-delete'):\n102 self._query.run()\n103 self.setQuery(self._query)\n104\n[end of qutebrowser/completion/models/histcategory.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -72,6 +72,10 @@\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n \n+ if not min_atime:\n+ # if there are no history items, min_atime may be '' (issue #2849)\n+ return ''\n+\n return \"AND last_atime >= {}\".format(min_atime)\n \n def set_pattern(self, pattern):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\\n--- a/qutebrowser/completion/models/histcategory.py\\n+++ b/qutebrowser/completion/models/histcategory.py\\n@@ -72,6 +72,10 @@\\n 'ORDER BY last_atime DESC LIMIT :limit)',\\n ])).run(limit=max_items).value()\\n \\n+ if not min_atime:\\n+ # if there are no history items, min_atime may be '' (issue #2849)\\n+ return ''\\n+\\n return \\\"AND last_atime >= {}\\\".format(min_atime)\\n \\n def set_pattern(self, pattern):\\n\", \"issue\": \"Crash with web-history-max-items and no items in the history\\nWhen running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:\\r\\n\\r\\n```\\r\\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \\\"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\\\"\\r\\n12:21:36 DEBUG sql sql:run:99 Running SQL query: \\\"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\\\"\\r\\n12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}\\r\\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \\\"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\') AND last_atime >= ORDER BY last_atime DESC\\\"\\r\\n12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.\\r\\n12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception\\r\\nTraceback (most recent call last):\\r\\n File \\\"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\\\", line 236, in _update_completion\\r\\n model = func(*args)\\r\\n File \\\"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py\\\", line 70, in url\\r\\n hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)\\r\\n File \\\"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\\\", line 54, in __init__\\r\\n ]), forward_only=False)\\r\\n File \\\"/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py\\\", line 83, in __init__\\r\\n querystr, self.lastError().text()))\\r\\nqutebrowser.misc.sql.SqlException: Failed to prepare query \\\"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\') AND last_atime >= ORDER BY last_atime DESC\\\": \\\"near \\\"ORDER\\\": syntax error Unable to execute statement\\\"\\r\\n```\\r\\n\\r\\ncc @rcorre \\n\", \"before_files\": [{\"content\": \"# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\\n\\n# Copyright 2017 Ryan Roden-Corrent (rcorre) \\n#\\n# This file is part of qutebrowser.\\n#\\n# qutebrowser is free software: you can redistribute it and/or modify\\n# it under the terms of the GNU General Public License as published by\\n# the Free Software Foundation, either version 3 of the License, or\\n# (at your option) any later version.\\n#\\n# qutebrowser is distributed in the hope that it will be useful,\\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n# GNU General Public License for more details.\\n#\\n# You should have received a copy of the GNU General Public License\\n# along with qutebrowser. If not, see .\\n\\n\\\"\\\"\\\"A completion category that queries the SQL History store.\\\"\\\"\\\"\\n\\nimport re\\n\\nfrom PyQt5.QtSql import QSqlQueryModel\\n\\nfrom qutebrowser.misc import sql\\nfrom qutebrowser.utils import debug\\nfrom qutebrowser.commands import cmdexc\\nfrom qutebrowser.config import config\\n\\n\\nclass HistoryCategory(QSqlQueryModel):\\n\\n \\\"\\\"\\\"A completion category that queries the SQL History store.\\\"\\\"\\\"\\n\\n def __init__(self, *, delete_func=None, parent=None):\\n \\\"\\\"\\\"Create a new History completion category.\\\"\\\"\\\"\\n super().__init__(parent=parent)\\n self.name = \\\"History\\\"\\n\\n # replace ' in timestamp-format to avoid breaking the query\\n timefmt = (\\\"strftime('{}', last_atime, 'unixepoch', 'localtime')\\\"\\n .format(config.get('completion', 'timestamp-format')\\n .replace(\\\"'\\\", \\\"`\\\")))\\n\\n self._query = sql.Query(' '.join([\\n \\\"SELECT url, title, {}\\\".format(timefmt),\\n \\\"FROM CompletionHistory\\\",\\n # the incoming pattern will have literal % and _ escaped with '\\\\'\\n # we need to tell sql to treat '\\\\' as an escape character\\n \\\"WHERE (url LIKE :pat escape '\\\\\\\\' or title LIKE :pat escape '\\\\\\\\')\\\",\\n self._atime_expr(),\\n \\\"ORDER BY last_atime DESC\\\",\\n ]), forward_only=False)\\n\\n # advertise that this model filters by URL and title\\n self.columns_to_filter = [0, 1]\\n self.delete_func = delete_func\\n\\n def _atime_expr(self):\\n \\\"\\\"\\\"If max_items is set, return an expression to limit the query.\\\"\\\"\\\"\\n max_items = config.get('completion', 'web-history-max-items')\\n # HistoryCategory should not be added to the completion in that case.\\n assert max_items != 0\\n\\n if max_items < 0:\\n return ''\\n\\n min_atime = sql.Query(' '.join([\\n 'SELECT min(last_atime) FROM',\\n '(SELECT last_atime FROM CompletionHistory',\\n 'ORDER BY last_atime DESC LIMIT :limit)',\\n ])).run(limit=max_items).value()\\n\\n return \\\"AND last_atime >= {}\\\".format(min_atime)\\n\\n def set_pattern(self, pattern):\\n \\\"\\\"\\\"Set the pattern used to filter results.\\n\\n Args:\\n pattern: string pattern to filter by.\\n \\\"\\\"\\\"\\n # escape to treat a user input % or _ as a literal, not a wildcard\\n pattern = pattern.replace('%', '\\\\\\\\%')\\n pattern = pattern.replace('_', '\\\\\\\\_')\\n # treat spaces as wildcards to match any of the typed words\\n pattern = re.sub(r' +', '%', pattern)\\n pattern = '%{}%'.format(pattern)\\n with debug.log_time('sql', 'Running completion query'):\\n self._query.run(pat=pattern)\\n self.setQuery(self._query)\\n\\n def delete_cur_item(self, index):\\n \\\"\\\"\\\"Delete the row at the given index.\\\"\\\"\\\"\\n if not self.delete_func:\\n raise cmdexc.CommandError(\\\"Cannot delete this item.\\\")\\n data = [self.data(index.sibling(index.row(), i))\\n for i in range(self.columnCount())]\\n self.delete_func(data)\\n # re-run query to reload updated table\\n with debug.log_time('sql', 'Re-running completion query post-delete'):\\n self._query.run()\\n self.setQuery(self._query)\\n\", \"path\": \"qutebrowser/completion/models/histcategory.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2238,"string":"2,238"},"num_tokens_diff":{"kind":"number","value":148,"string":"148"}}},{"rowIdx":18118,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_32333"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"airctic__icevision-1091"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nTypo lightning -> lighting in Albumentations helper\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\r\n\r\n\nTypo lightning -> lighting in Albumentations helper\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\r\n\r\n\n\n\n\n[start of icevision/tfms/albumentations/albumentations_helpers.py]\n1 __all__ = [\"aug_tfms\", \"resize\", \"resize_and_pad\", \"get_size_without_padding\"]\n2 \n3 import albumentations as A\n4 \n5 from icevision.imports import *\n6 from icevision.core import *\n7 \n8 \n9 def resize(size, ratio_resize=A.LongestMaxSize):\n10 return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1])\n11 \n12 \n13 def resize_and_pad(\n14 size: Union[int, Tuple[int, int]],\n15 pad: A.DualTransform = partial(\n16 A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n17 ),\n18 ):\n19 width, height = (size, size) if isinstance(size, int) else size\n20 return [resize(size), pad(min_height=height, min_width=width)]\n21 \n22 \n23 def aug_tfms(\n24 size: Union[int, Tuple[int, int]],\n25 presize: Optional[Union[int, Tuple[int, int]]] = None,\n26 horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),\n27 shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(\n28 rotate_limit=15,\n29 ),\n30 rgb_shift: Optional[A.RGBShift] = A.RGBShift(\n31 r_shift_limit=10,\n32 g_shift_limit=10,\n33 b_shift_limit=10,\n34 ),\n35 lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n36 blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n37 crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n38 pad: Optional[A.DualTransform] = partial(\n39 A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n40 ),\n41 ) -> List[A.BasicTransform]:\n42 \"\"\"Collection of useful augmentation transforms.\n43 \n44 # Arguments\n45 size: The final size of the image. If an `int` is given, the maximum size of\n46 the image is rescaled, maintaing aspect ratio. If a `tuple` is given,\n47 the image is rescaled to have that exact size (width, height).\n48 presize: Rescale the image before applying other transfroms. If `None` this\n49 transform is not applied. First introduced by fastai,this technique is\n50 explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)\n51 chapter (tip: search for \"Presizing\").\n52 horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.\n53 shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform\n54 is not applied.\n55 rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n56 transform is not applied.\n57 lightning: Randomly changes Brightness and Contrast. If `None` this transform\n58 is not applied.\n59 blur: Randomly blur the image. If `None` this transform is not applied.\n60 crop_fn: Randomly crop the image. If `None` this transform is not applied.\n61 Use `partial` to saturate other parameters of the class.\n62 pad: Pad the image to `size`, squaring the image if `size` is an `int`.\n63 If `None` this transform is not applied. Use `partial` to sature other\n64 parameters of the class.\n65 \n66 # Returns\n67 A list of albumentations transforms.\n68 \"\"\"\n69 \n70 width, height = (size, size) if isinstance(size, int) else size\n71 \n72 tfms = []\n73 tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n74 tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\n75 # Resize as the last transforms to reduce the number of artificial artifacts created\n76 if crop_fn is not None:\n77 crop = crop_fn(height=height, width=width)\n78 tfms += [A.OneOrOther(crop, resize(size), p=crop.p)]\n79 else:\n80 tfms += [resize(size)]\n81 tfms += [pad(min_height=height, min_width=width) if pad is not None else None]\n82 \n83 tfms = [tfm for tfm in tfms if tfm is not None]\n84 \n85 return tfms\n86 \n87 \n88 def get_size_without_padding(\n89 tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int\n90 ) -> Tuple[int, int]:\n91 \"\"\"\n92 Infer the height and width of the pre-processed image after removing padding.\n93 \n94 Parameters\n95 ----------\n96 tfms_list: list of albumentations transforms applied to the `before_tfm_img` image\n97 before passing it to the model for inference.\n98 before_tfm_img: original image before being pre-processed for inference.\n99 height: height of output image from icevision `predict` function.\n100 width: width of output image from icevision `predict` function.\n101 \n102 Returns\n103 -------\n104 height and width of the image coming out of the inference pipeline, after removing padding\n105 \"\"\"\n106 if get_transform(tfms_list, \"Pad\") is not None:\n107 before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape\n108 \n109 t = get_transform(tfms_list, \"SmallestMaxSize\")\n110 if t is not None:\n111 presize = t.max_size\n112 height, width = func_max_size(before_pad_h, before_pad_w, presize, min)\n113 \n114 t = get_transform(tfms_list, \"LongestMaxSize\")\n115 if t is not None:\n116 size = t.max_size\n117 height, width = func_max_size(before_pad_h, before_pad_w, size, max)\n118 \n119 return height, width\n120 \n121 \n122 def py3round(number: float) -> int:\n123 \"\"\"\n124 Unified rounding in all python versions. Used by albumentations.\n125 \n126 Parameters\n127 ----------\n128 number: float to round.\n129 \n130 Returns\n131 -------\n132 Rounded number\n133 \"\"\"\n134 if abs(round(number) - number) == 0.5:\n135 return int(2.0 * round(number / 2.0))\n136 \n137 return int(round(number))\n138 \n139 \n140 def func_max_size(\n141 height: int, width: int, max_size: int, func: Callable[[int, int], int]\n142 ) -> Tuple[int, int]:\n143 \"\"\"\n144 Calculate rescaled height and width of the image in question wrt to a specific size.\n145 \n146 Parameters\n147 ----------\n148 height: height of the image in question.\n149 width: width of the image in question.\n150 max_size: size wrt the image needs to be rescaled (resized).\n151 func: min/max. Whether to compare max_size to the smallest/longest of the image dims.\n152 \n153 Returns\n154 -------\n155 Rescaled height and width\n156 \"\"\"\n157 scale = max_size / float(func(width, height))\n158 \n159 if scale != 1.0:\n160 height, width = tuple(py3round(dim * scale) for dim in (height, width))\n161 return height, width\n162 \n163 \n164 def get_transform(tfms_list: List[Any], t: str) -> Any:\n165 \"\"\"\n166 Extract transform `t` from `tfms_list`.\n167 \n168 Parameters\n169 ----------\n170 tfms_list: list of albumentations transforms.\n171 t: name (str) of the transform to look for and return from within `tfms_list`.\n172 \n173 Returns\n174 -------\n175 The `t` transform if found inside `tfms_list`, otherwise None.\n176 \"\"\"\n177 for el in tfms_list:\n178 if t in str(type(el)):\n179 return el\n180 return None\n181\n[end of icevision/tfms/albumentations/albumentations_helpers.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py\n--- a/icevision/tfms/albumentations/albumentations_helpers.py\n+++ b/icevision/tfms/albumentations/albumentations_helpers.py\n@@ -32,7 +32,7 @@\n g_shift_limit=10,\n b_shift_limit=10,\n ),\n- lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n+ lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n pad: Optional[A.DualTransform] = partial(\n@@ -54,7 +54,7 @@\n is not applied.\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n transform is not applied.\n- lightning: Randomly changes Brightness and Contrast. If `None` this transform\n+ lighting: Randomly changes Brightness and Contrast. If `None` this transform\n is not applied.\n blur: Randomly blur the image. If `None` this transform is not applied.\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\n@@ -71,7 +71,7 @@\n \n tfms = []\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n- tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\n+ tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]\n # Resize as the last transforms to reduce the number of artificial artifacts created\n if crop_fn is not None:\n crop = crop_fn(height=height, width=width)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py\\n--- a/icevision/tfms/albumentations/albumentations_helpers.py\\n+++ b/icevision/tfms/albumentations/albumentations_helpers.py\\n@@ -32,7 +32,7 @@\\n g_shift_limit=10,\\n b_shift_limit=10,\\n ),\\n- lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\\n+ lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\\n pad: Optional[A.DualTransform] = partial(\\n@@ -54,7 +54,7 @@\\n is not applied.\\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\\n transform is not applied.\\n- lightning: Randomly changes Brightness and Contrast. If `None` this transform\\n+ lighting: Randomly changes Brightness and Contrast. If `None` this transform\\n is not applied.\\n blur: Randomly blur the image. If `None` this transform is not applied.\\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\\n@@ -71,7 +71,7 @@\\n \\n tfms = []\\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\\n- tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\\n+ tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]\\n # Resize as the last transforms to reduce the number of artificial artifacts created\\n if crop_fn is not None:\\n crop = crop_fn(height=height, width=width)\\n\", \"issue\": \"Typo lightning -> lighting in Albumentations helper\\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\\r\\n\\r\\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\\r\\n\\r\\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\\r\\n\\r\\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\\r\\n\\r\\n\\nTypo lightning -> lighting in Albumentations helper\\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\\r\\n\\r\\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\\r\\n\\r\\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\\r\\n\\r\\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"__all__ = [\\\"aug_tfms\\\", \\\"resize\\\", \\\"resize_and_pad\\\", \\\"get_size_without_padding\\\"]\\n\\nimport albumentations as A\\n\\nfrom icevision.imports import *\\nfrom icevision.core import *\\n\\n\\ndef resize(size, ratio_resize=A.LongestMaxSize):\\n return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1])\\n\\n\\ndef resize_and_pad(\\n size: Union[int, Tuple[int, int]],\\n pad: A.DualTransform = partial(\\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\\n ),\\n):\\n width, height = (size, size) if isinstance(size, int) else size\\n return [resize(size), pad(min_height=height, min_width=width)]\\n\\n\\ndef aug_tfms(\\n size: Union[int, Tuple[int, int]],\\n presize: Optional[Union[int, Tuple[int, int]]] = None,\\n horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),\\n shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(\\n rotate_limit=15,\\n ),\\n rgb_shift: Optional[A.RGBShift] = A.RGBShift(\\n r_shift_limit=10,\\n g_shift_limit=10,\\n b_shift_limit=10,\\n ),\\n lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\\n pad: Optional[A.DualTransform] = partial(\\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\\n ),\\n) -> List[A.BasicTransform]:\\n \\\"\\\"\\\"Collection of useful augmentation transforms.\\n\\n # Arguments\\n size: The final size of the image. If an `int` is given, the maximum size of\\n the image is rescaled, maintaing aspect ratio. If a `tuple` is given,\\n the image is rescaled to have that exact size (width, height).\\n presize: Rescale the image before applying other transfroms. If `None` this\\n transform is not applied. First introduced by fastai,this technique is\\n explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)\\n chapter (tip: search for \\\"Presizing\\\").\\n horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.\\n shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform\\n is not applied.\\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\\n transform is not applied.\\n lightning: Randomly changes Brightness and Contrast. If `None` this transform\\n is not applied.\\n blur: Randomly blur the image. If `None` this transform is not applied.\\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\\n Use `partial` to saturate other parameters of the class.\\n pad: Pad the image to `size`, squaring the image if `size` is an `int`.\\n If `None` this transform is not applied. Use `partial` to sature other\\n parameters of the class.\\n\\n # Returns\\n A list of albumentations transforms.\\n \\\"\\\"\\\"\\n\\n width, height = (size, size) if isinstance(size, int) else size\\n\\n tfms = []\\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\\n tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\\n # Resize as the last transforms to reduce the number of artificial artifacts created\\n if crop_fn is not None:\\n crop = crop_fn(height=height, width=width)\\n tfms += [A.OneOrOther(crop, resize(size), p=crop.p)]\\n else:\\n tfms += [resize(size)]\\n tfms += [pad(min_height=height, min_width=width) if pad is not None else None]\\n\\n tfms = [tfm for tfm in tfms if tfm is not None]\\n\\n return tfms\\n\\n\\ndef get_size_without_padding(\\n tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int\\n) -> Tuple[int, int]:\\n \\\"\\\"\\\"\\n Infer the height and width of the pre-processed image after removing padding.\\n\\n Parameters\\n ----------\\n tfms_list: list of albumentations transforms applied to the `before_tfm_img` image\\n before passing it to the model for inference.\\n before_tfm_img: original image before being pre-processed for inference.\\n height: height of output image from icevision `predict` function.\\n width: width of output image from icevision `predict` function.\\n\\n Returns\\n -------\\n height and width of the image coming out of the inference pipeline, after removing padding\\n \\\"\\\"\\\"\\n if get_transform(tfms_list, \\\"Pad\\\") is not None:\\n before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape\\n\\n t = get_transform(tfms_list, \\\"SmallestMaxSize\\\")\\n if t is not None:\\n presize = t.max_size\\n height, width = func_max_size(before_pad_h, before_pad_w, presize, min)\\n\\n t = get_transform(tfms_list, \\\"LongestMaxSize\\\")\\n if t is not None:\\n size = t.max_size\\n height, width = func_max_size(before_pad_h, before_pad_w, size, max)\\n\\n return height, width\\n\\n\\ndef py3round(number: float) -> int:\\n \\\"\\\"\\\"\\n Unified rounding in all python versions. Used by albumentations.\\n\\n Parameters\\n ----------\\n number: float to round.\\n\\n Returns\\n -------\\n Rounded number\\n \\\"\\\"\\\"\\n if abs(round(number) - number) == 0.5:\\n return int(2.0 * round(number / 2.0))\\n\\n return int(round(number))\\n\\n\\ndef func_max_size(\\n height: int, width: int, max_size: int, func: Callable[[int, int], int]\\n) -> Tuple[int, int]:\\n \\\"\\\"\\\"\\n Calculate rescaled height and width of the image in question wrt to a specific size.\\n\\n Parameters\\n ----------\\n height: height of the image in question.\\n width: width of the image in question.\\n max_size: size wrt the image needs to be rescaled (resized).\\n func: min/max. Whether to compare max_size to the smallest/longest of the image dims.\\n\\n Returns\\n -------\\n Rescaled height and width\\n \\\"\\\"\\\"\\n scale = max_size / float(func(width, height))\\n\\n if scale != 1.0:\\n height, width = tuple(py3round(dim * scale) for dim in (height, width))\\n return height, width\\n\\n\\ndef get_transform(tfms_list: List[Any], t: str) -> Any:\\n \\\"\\\"\\\"\\n Extract transform `t` from `tfms_list`.\\n\\n Parameters\\n ----------\\n tfms_list: list of albumentations transforms.\\n t: name (str) of the transform to look for and return from within `tfms_list`.\\n\\n Returns\\n -------\\n The `t` transform if found inside `tfms_list`, otherwise None.\\n \\\"\\\"\\\"\\n for el in tfms_list:\\n if t in str(type(el)):\\n return el\\n return None\\n\", \"path\": \"icevision/tfms/albumentations/albumentations_helpers.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3162,"string":"3,162"},"num_tokens_diff":{"kind":"number","value":447,"string":"447"}}},{"rowIdx":18119,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_6977"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"systemd__mkosi-2315"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\n`sh` unavailable in sandbox on non `usr`-merged systems\nmkosi's sandbox [mounts `/usr:/usr`](https://github.com/systemd/mkosi/blob/14b07c74d5b3a8b0f44d9a5c733f7902c7859417/mkosi/sandbox.py#L88) if no ToolsTree is specified. On NixOS, which is not usr-merged, this causes problems when programs have runtime dependencies on absolute paths in `(/usr)/bin`. This is the case for `apt-get install`, that tries to find an `sh` at runtime in `/usr/bin` or `/bin`. That fails on NixOS, due to only `/usr` being mounted, while `sh` is in `/bin/sh`.\r\n\r\nTwo possible workarounds that I have in mind:\r\n1. (Preferred) Find a way to tell `apt-get` which shell to use. `dpkg` [respects the `SHELL` environment variable](https://man7.org/linux/man-pages/man1/dpkg.1.html), but I couldn't find a similar option for `apt-get`. If there would be such an option, we should simply tell `apt-get` to use the `sh` from the Nix store that's already mounted.\r\n2. Loosen the sandbox and mount `/bin(/sh)` into the environment. \r\n\r\nIf you have any input regarding the first proposed solution, let me know. Otherwise, I'd be happy to implement the second solution too.\r\n\r\nCheers, Moritz\n\n\n\n[start of mkosi/sandbox.py]\n1 # SPDX-License-Identifier: LGPL-2.1+\n2 import enum\n3 import logging\n4 import os\n5 import uuid\n6 from collections.abc import Sequence\n7 from pathlib import Path\n8 from typing import Optional\n9 \n10 from mkosi.types import PathString\n11 from mkosi.util import INVOKING_USER, flatten, one_zero\n12 \n13 \n14 # https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\n15 class Capability(enum.Enum):\n16 CAP_NET_ADMIN = 12\n17 \n18 \n19 def have_effective_cap(capability: Capability) -> bool:\n20 for line in Path(\"/proc/self/status\").read_text().splitlines():\n21 if line.startswith(\"CapEff:\"):\n22 hexcap = line.removeprefix(\"CapEff:\").strip()\n23 break\n24 else:\n25 logging.warning(f\"\\\"CapEff:\\\" not found in /proc/self/status, assuming we don't have {capability}\")\n26 return False\n27 \n28 return (int(hexcap, 16) & (1 << capability.value)) != 0\n29 \n30 \n31 def finalize_passwd_mounts(root: Path) -> list[PathString]:\n32 \"\"\"\n33 If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\n34 run the command, to make sure that the command we run uses user/group information from the apivfs\n35 directory instead of from the host.\n36 \"\"\"\n37 options: list[PathString] = []\n38 \n39 for f in (\"passwd\", \"group\", \"shadow\", \"gshadow\"):\n40 options += [\"--ro-bind-try\", root / \"etc\" / f, f\"/etc/{f}\"]\n41 \n42 return options\n43 \n44 \n45 def finalize_crypto_mounts(tools: Path = Path(\"/\")) -> list[PathString]:\n46 mounts = [\n47 (tools / subdir, Path(\"/\") / subdir)\n48 for subdir in (\n49 Path(\"etc/pki\"),\n50 Path(\"etc/ssl\"),\n51 Path(\"etc/crypto-policies\"),\n52 Path(\"etc/ca-certificates\"),\n53 Path(\"etc/pacman.d/gnupg\"),\n54 Path(\"var/lib/ca-certificates\"),\n55 )\n56 if (tools / subdir).exists()\n57 ]\n58 \n59 return flatten(\n60 [\"--ro-bind\", src, target]\n61 for src, target\n62 in sorted(set(mounts), key=lambda s: s[1])\n63 )\n64 \n65 \n66 def sandbox_cmd(\n67 *,\n68 network: bool = False,\n69 devices: bool = False,\n70 scripts: Optional[Path] = None,\n71 tools: Path = Path(\"/\"),\n72 relaxed: bool = False,\n73 options: Sequence[PathString] = (),\n74 ) -> list[PathString]:\n75 cmdline: list[PathString] = []\n76 \n77 if not relaxed:\n78 # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only\n79 # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it\n80 # using shell.\n81 vartmp = f\"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}\"\n82 cmdline += [\"sh\", \"-c\", f\"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \\\"$@\\\"\"]\n83 else:\n84 vartmp = None\n85 \n86 cmdline += [\n87 \"bwrap\",\n88 \"--ro-bind\", tools / \"usr\", \"/usr\",\n89 *([\"--unshare-net\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\n90 \"--die-with-parent\",\n91 \"--proc\", \"/proc\",\n92 \"--setenv\", \"SYSTEMD_OFFLINE\", one_zero(network),\n93 ]\n94 \n95 if relaxed:\n96 cmdline += [\"--bind\", \"/tmp\", \"/tmp\"]\n97 else:\n98 cmdline += [\n99 \"--tmpfs\", \"/tmp\",\n100 \"--unshare-ipc\",\n101 ]\n102 \n103 if (tools / \"nix/store\").exists():\n104 cmdline += [\"--bind\", tools / \"nix/store\", \"/nix/store\"]\n105 \n106 if devices or relaxed:\n107 cmdline += [\n108 \"--bind\", \"/sys\", \"/sys\",\n109 \"--bind\", \"/run\", \"/run\",\n110 \"--dev-bind\", \"/dev\", \"/dev\",\n111 ]\n112 else:\n113 cmdline += [\"--dev\", \"/dev\"]\n114 \n115 if relaxed:\n116 dirs = (\"/etc\", \"/opt\", \"/srv\", \"/media\", \"/mnt\", \"/var\", os.fspath(INVOKING_USER.home()))\n117 \n118 for d in dirs:\n119 if Path(d).exists():\n120 cmdline += [\"--bind\", d, d]\n121 \n122 if len(Path.cwd().parents) >= 2:\n123 # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards.\n124 # TODO: Remove list() when we depend on Python 3.10 or newer.\n125 d = os.fspath(list(Path.cwd().parents)[-2])\n126 elif len(Path.cwd().parents) == 1:\n127 d = os.fspath(Path.cwd())\n128 else:\n129 d = \"\"\n130 \n131 if d and d not in (*dirs, \"/home\", \"/usr\", \"/nix\", \"/tmp\"):\n132 cmdline += [\"--bind\", d, d]\n133 \n134 if vartmp:\n135 cmdline += [\"--bind\", vartmp, \"/var/tmp\"]\n136 \n137 for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n138 if (p := tools / d).is_symlink():\n139 cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n140 \n141 path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n142 \n143 cmdline += [\n144 \"--setenv\", \"PATH\", f\"{scripts or ''}:{path}\",\n145 *options,\n146 ]\n147 \n148 if not relaxed:\n149 cmdline += [\"--symlink\", \"../proc/self/mounts\", \"/etc/mtab\"]\n150 \n151 # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it\n152 # exists since that points directly back to /usr. Apply this after the options so the caller can mount\n153 # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives\n154 # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or\n155 # fail when trying to create it).\n156 if (tools / \"etc/alternatives\").exists() and (not relaxed or Path(\"/etc/alternatives\").exists()):\n157 cmdline += [\"--ro-bind\", tools / \"etc/alternatives\", \"/etc/alternatives\"]\n158 \n159 if scripts:\n160 cmdline += [\"--ro-bind\", scripts, scripts]\n161 \n162 if network and not relaxed:\n163 cmdline += [\"--bind\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n164 \n165 # bubblewrap creates everything with a restricted mode so relax stuff as needed.\n166 ops = []\n167 if not devices:\n168 ops += [\"chmod 1777 /dev/shm\"]\n169 if not relaxed:\n170 ops += [\"chmod 755 /etc\"]\n171 ops += [\"exec $0 \\\"$@\\\"\"]\n172 \n173 cmdline += [\"sh\", \"-c\", \" && \".join(ops)]\n174 \n175 return cmdline\n176 \n177 \n178 def apivfs_cmd(root: Path) -> list[PathString]:\n179 return [\n180 \"bwrap\",\n181 \"--dev-bind\", \"/\", \"/\",\n182 \"--tmpfs\", root / \"run\",\n183 \"--tmpfs\", root / \"tmp\",\n184 \"--bind\", \"/var/tmp\", root / \"var/tmp\",\n185 \"--proc\", root / \"proc\",\n186 \"--dev\", root / \"dev\",\n187 # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe.\n188 \"--unsetenv\", \"TMPDIR\",\n189 # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\n190 \"--ro-bind-try\", root / \"etc/machine-id\", root / \"etc/machine-id\",\n191 *finalize_passwd_mounts(root),\n192 \"sh\", \"-c\",\n193 f\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && \"\n194 f\"chmod 755 {root / 'run'} && \"\n195 # Make sure anything running in the root directory thinks it's in a container. $container can't always be\n196 # accessed so we write /run/host/container-manager as well which is always accessible.\n197 f\"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && \"\n198 \"exec $0 \\\"$@\\\"\",\n199 ]\n200 \n201 \n202 def chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]:\n203 cmdline: list[PathString] = [\n204 \"sh\", \"-c\",\n205 f\"trap 'rm -rf {root / 'work'}' EXIT && \"\n206 # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call\n207 # mkdir() on each component of the path which means it will try to call\n208 # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so\n209 # we make sure that it already exists.\n210 f\"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && \"\n211 # No exec here because we need to clean up the /work directory afterwards.\n212 f\"$0 \\\"$@\\\"\",\n213 \"bwrap\",\n214 \"--dev-bind\", root, \"/\",\n215 \"--setenv\", \"container\", \"mkosi\",\n216 \"--setenv\", \"HOME\", \"/\",\n217 \"--setenv\", \"PATH\", \"/work/scripts:/usr/bin:/usr/sbin\",\n218 ]\n219 \n220 if resolve:\n221 cmdline += [\"--ro-bind-try\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n222 \n223 cmdline += options\n224 \n225 return apivfs_cmd(root) + cmdline\n226\n[end of mkosi/sandbox.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py\n--- a/mkosi/sandbox.py\n+++ b/mkosi/sandbox.py\n@@ -137,6 +137,8 @@\n for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n if (p := tools / d).is_symlink():\n cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n+ elif p.is_dir():\n+ cmdline += [\"--ro-bind\", p, Path(\"/\") / p.relative_to(tools)]\n \n path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py\\n--- a/mkosi/sandbox.py\\n+++ b/mkosi/sandbox.py\\n@@ -137,6 +137,8 @@\\n for d in (\\\"bin\\\", \\\"sbin\\\", \\\"lib\\\", \\\"lib32\\\", \\\"lib64\\\"):\\n if (p := tools / d).is_symlink():\\n cmdline += [\\\"--symlink\\\", p.readlink(), Path(\\\"/\\\") / p.relative_to(tools)]\\n+ elif p.is_dir():\\n+ cmdline += [\\\"--ro-bind\\\", p, Path(\\\"/\\\") / p.relative_to(tools)]\\n \\n path = \\\"/usr/bin:/usr/sbin\\\" if tools != Path(\\\"/\\\") else os.environ[\\\"PATH\\\"]\\n\", \"issue\": \"`sh` unavailable in sandbox on non `usr`-merged systems\\nmkosi's sandbox [mounts `/usr:/usr`](https://github.com/systemd/mkosi/blob/14b07c74d5b3a8b0f44d9a5c733f7902c7859417/mkosi/sandbox.py#L88) if no ToolsTree is specified. On NixOS, which is not usr-merged, this causes problems when programs have runtime dependencies on absolute paths in `(/usr)/bin`. This is the case for `apt-get install`, that tries to find an `sh` at runtime in `/usr/bin` or `/bin`. That fails on NixOS, due to only `/usr` being mounted, while `sh` is in `/bin/sh`.\\r\\n\\r\\nTwo possible workarounds that I have in mind:\\r\\n1. (Preferred) Find a way to tell `apt-get` which shell to use. `dpkg` [respects the `SHELL` environment variable](https://man7.org/linux/man-pages/man1/dpkg.1.html), but I couldn't find a similar option for `apt-get`. If there would be such an option, we should simply tell `apt-get` to use the `sh` from the Nix store that's already mounted.\\r\\n2. Loosen the sandbox and mount `/bin(/sh)` into the environment. \\r\\n\\r\\nIf you have any input regarding the first proposed solution, let me know. Otherwise, I'd be happy to implement the second solution too.\\r\\n\\r\\nCheers, Moritz\\n\", \"before_files\": [{\"content\": \"# SPDX-License-Identifier: LGPL-2.1+\\nimport enum\\nimport logging\\nimport os\\nimport uuid\\nfrom collections.abc import Sequence\\nfrom pathlib import Path\\nfrom typing import Optional\\n\\nfrom mkosi.types import PathString\\nfrom mkosi.util import INVOKING_USER, flatten, one_zero\\n\\n\\n# https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\\nclass Capability(enum.Enum):\\n CAP_NET_ADMIN = 12\\n\\n\\ndef have_effective_cap(capability: Capability) -> bool:\\n for line in Path(\\\"/proc/self/status\\\").read_text().splitlines():\\n if line.startswith(\\\"CapEff:\\\"):\\n hexcap = line.removeprefix(\\\"CapEff:\\\").strip()\\n break\\n else:\\n logging.warning(f\\\"\\\\\\\"CapEff:\\\\\\\" not found in /proc/self/status, assuming we don't have {capability}\\\")\\n return False\\n\\n return (int(hexcap, 16) & (1 << capability.value)) != 0\\n\\n\\ndef finalize_passwd_mounts(root: Path) -> list[PathString]:\\n \\\"\\\"\\\"\\n If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\\n run the command, to make sure that the command we run uses user/group information from the apivfs\\n directory instead of from the host.\\n \\\"\\\"\\\"\\n options: list[PathString] = []\\n\\n for f in (\\\"passwd\\\", \\\"group\\\", \\\"shadow\\\", \\\"gshadow\\\"):\\n options += [\\\"--ro-bind-try\\\", root / \\\"etc\\\" / f, f\\\"/etc/{f}\\\"]\\n\\n return options\\n\\n\\ndef finalize_crypto_mounts(tools: Path = Path(\\\"/\\\")) -> list[PathString]:\\n mounts = [\\n (tools / subdir, Path(\\\"/\\\") / subdir)\\n for subdir in (\\n Path(\\\"etc/pki\\\"),\\n Path(\\\"etc/ssl\\\"),\\n Path(\\\"etc/crypto-policies\\\"),\\n Path(\\\"etc/ca-certificates\\\"),\\n Path(\\\"etc/pacman.d/gnupg\\\"),\\n Path(\\\"var/lib/ca-certificates\\\"),\\n )\\n if (tools / subdir).exists()\\n ]\\n\\n return flatten(\\n [\\\"--ro-bind\\\", src, target]\\n for src, target\\n in sorted(set(mounts), key=lambda s: s[1])\\n )\\n\\n\\ndef sandbox_cmd(\\n *,\\n network: bool = False,\\n devices: bool = False,\\n scripts: Optional[Path] = None,\\n tools: Path = Path(\\\"/\\\"),\\n relaxed: bool = False,\\n options: Sequence[PathString] = (),\\n) -> list[PathString]:\\n cmdline: list[PathString] = []\\n\\n if not relaxed:\\n # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only\\n # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it\\n # using shell.\\n vartmp = f\\\"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}\\\"\\n cmdline += [\\\"sh\\\", \\\"-c\\\", f\\\"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \\\\\\\"$@\\\\\\\"\\\"]\\n else:\\n vartmp = None\\n\\n cmdline += [\\n \\\"bwrap\\\",\\n \\\"--ro-bind\\\", tools / \\\"usr\\\", \\\"/usr\\\",\\n *([\\\"--unshare-net\\\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\\n \\\"--die-with-parent\\\",\\n \\\"--proc\\\", \\\"/proc\\\",\\n \\\"--setenv\\\", \\\"SYSTEMD_OFFLINE\\\", one_zero(network),\\n ]\\n\\n if relaxed:\\n cmdline += [\\\"--bind\\\", \\\"/tmp\\\", \\\"/tmp\\\"]\\n else:\\n cmdline += [\\n \\\"--tmpfs\\\", \\\"/tmp\\\",\\n \\\"--unshare-ipc\\\",\\n ]\\n\\n if (tools / \\\"nix/store\\\").exists():\\n cmdline += [\\\"--bind\\\", tools / \\\"nix/store\\\", \\\"/nix/store\\\"]\\n\\n if devices or relaxed:\\n cmdline += [\\n \\\"--bind\\\", \\\"/sys\\\", \\\"/sys\\\",\\n \\\"--bind\\\", \\\"/run\\\", \\\"/run\\\",\\n \\\"--dev-bind\\\", \\\"/dev\\\", \\\"/dev\\\",\\n ]\\n else:\\n cmdline += [\\\"--dev\\\", \\\"/dev\\\"]\\n\\n if relaxed:\\n dirs = (\\\"/etc\\\", \\\"/opt\\\", \\\"/srv\\\", \\\"/media\\\", \\\"/mnt\\\", \\\"/var\\\", os.fspath(INVOKING_USER.home()))\\n\\n for d in dirs:\\n if Path(d).exists():\\n cmdline += [\\\"--bind\\\", d, d]\\n\\n if len(Path.cwd().parents) >= 2:\\n # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards.\\n # TODO: Remove list() when we depend on Python 3.10 or newer.\\n d = os.fspath(list(Path.cwd().parents)[-2])\\n elif len(Path.cwd().parents) == 1:\\n d = os.fspath(Path.cwd())\\n else:\\n d = \\\"\\\"\\n\\n if d and d not in (*dirs, \\\"/home\\\", \\\"/usr\\\", \\\"/nix\\\", \\\"/tmp\\\"):\\n cmdline += [\\\"--bind\\\", d, d]\\n\\n if vartmp:\\n cmdline += [\\\"--bind\\\", vartmp, \\\"/var/tmp\\\"]\\n\\n for d in (\\\"bin\\\", \\\"sbin\\\", \\\"lib\\\", \\\"lib32\\\", \\\"lib64\\\"):\\n if (p := tools / d).is_symlink():\\n cmdline += [\\\"--symlink\\\", p.readlink(), Path(\\\"/\\\") / p.relative_to(tools)]\\n\\n path = \\\"/usr/bin:/usr/sbin\\\" if tools != Path(\\\"/\\\") else os.environ[\\\"PATH\\\"]\\n\\n cmdline += [\\n \\\"--setenv\\\", \\\"PATH\\\", f\\\"{scripts or ''}:{path}\\\",\\n *options,\\n ]\\n\\n if not relaxed:\\n cmdline += [\\\"--symlink\\\", \\\"../proc/self/mounts\\\", \\\"/etc/mtab\\\"]\\n\\n # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it\\n # exists since that points directly back to /usr. Apply this after the options so the caller can mount\\n # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives\\n # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or\\n # fail when trying to create it).\\n if (tools / \\\"etc/alternatives\\\").exists() and (not relaxed or Path(\\\"/etc/alternatives\\\").exists()):\\n cmdline += [\\\"--ro-bind\\\", tools / \\\"etc/alternatives\\\", \\\"/etc/alternatives\\\"]\\n\\n if scripts:\\n cmdline += [\\\"--ro-bind\\\", scripts, scripts]\\n\\n if network and not relaxed:\\n cmdline += [\\\"--bind\\\", \\\"/etc/resolv.conf\\\", \\\"/etc/resolv.conf\\\"]\\n\\n # bubblewrap creates everything with a restricted mode so relax stuff as needed.\\n ops = []\\n if not devices:\\n ops += [\\\"chmod 1777 /dev/shm\\\"]\\n if not relaxed:\\n ops += [\\\"chmod 755 /etc\\\"]\\n ops += [\\\"exec $0 \\\\\\\"$@\\\\\\\"\\\"]\\n\\n cmdline += [\\\"sh\\\", \\\"-c\\\", \\\" && \\\".join(ops)]\\n\\n return cmdline\\n\\n\\ndef apivfs_cmd(root: Path) -> list[PathString]:\\n return [\\n \\\"bwrap\\\",\\n \\\"--dev-bind\\\", \\\"/\\\", \\\"/\\\",\\n \\\"--tmpfs\\\", root / \\\"run\\\",\\n \\\"--tmpfs\\\", root / \\\"tmp\\\",\\n \\\"--bind\\\", \\\"/var/tmp\\\", root / \\\"var/tmp\\\",\\n \\\"--proc\\\", root / \\\"proc\\\",\\n \\\"--dev\\\", root / \\\"dev\\\",\\n # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe.\\n \\\"--unsetenv\\\", \\\"TMPDIR\\\",\\n # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\\n \\\"--ro-bind-try\\\", root / \\\"etc/machine-id\\\", root / \\\"etc/machine-id\\\",\\n *finalize_passwd_mounts(root),\\n \\\"sh\\\", \\\"-c\\\",\\n f\\\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && \\\"\\n f\\\"chmod 755 {root / 'run'} && \\\"\\n # Make sure anything running in the root directory thinks it's in a container. $container can't always be\\n # accessed so we write /run/host/container-manager as well which is always accessible.\\n f\\\"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && \\\"\\n \\\"exec $0 \\\\\\\"$@\\\\\\\"\\\",\\n ]\\n\\n\\ndef chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]:\\n cmdline: list[PathString] = [\\n \\\"sh\\\", \\\"-c\\\",\\n f\\\"trap 'rm -rf {root / 'work'}' EXIT && \\\"\\n # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call\\n # mkdir() on each component of the path which means it will try to call\\n # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so\\n # we make sure that it already exists.\\n f\\\"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && \\\"\\n # No exec here because we need to clean up the /work directory afterwards.\\n f\\\"$0 \\\\\\\"$@\\\\\\\"\\\",\\n \\\"bwrap\\\",\\n \\\"--dev-bind\\\", root, \\\"/\\\",\\n \\\"--setenv\\\", \\\"container\\\", \\\"mkosi\\\",\\n \\\"--setenv\\\", \\\"HOME\\\", \\\"/\\\",\\n \\\"--setenv\\\", \\\"PATH\\\", \\\"/work/scripts:/usr/bin:/usr/sbin\\\",\\n ]\\n\\n if resolve:\\n cmdline += [\\\"--ro-bind-try\\\", \\\"/etc/resolv.conf\\\", \\\"/etc/resolv.conf\\\"]\\n\\n cmdline += options\\n\\n return apivfs_cmd(root) + cmdline\\n\", \"path\": \"mkosi/sandbox.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3639,"string":"3,639"},"num_tokens_diff":{"kind":"number","value":156,"string":"156"}}},{"rowIdx":18120,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_35400"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"opsdroid__opsdroid-167"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nModule specific site-packages\nIt could be good for modules to install their dependancies in a specific `site-packages` directory which is only added to the path when it is time to import the modules.\r\n\r\nA good place could be `~/.opsdroid/site-packages`.\n\n\n\n[start of opsdroid/loader.py]\n1 \"\"\"Class for loading in modules to OpsDroid.\"\"\"\n2 \n3 import logging\n4 import os\n5 import sys\n6 import shutil\n7 import subprocess\n8 import importlib\n9 import yaml\n10 from opsdroid.const import (\n11 DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n12 DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\n13 \n14 \n15 _LOGGER = logging.getLogger(__name__)\n16 \n17 \n18 class Loader:\n19 \"\"\"Class to load in config and modules.\"\"\"\n20 \n21 def __init__(self, opsdroid):\n22 \"\"\"Create object with opsdroid instance.\"\"\"\n23 self.opsdroid = opsdroid\n24 self.modules_directory = None\n25 self.current_import_config = None\n26 _LOGGER.debug(\"Loaded loader\")\n27 \n28 @staticmethod\n29 def import_module(config):\n30 \"\"\"Import module namespace as variable and return it.\"\"\"\n31 try:\n32 module = importlib.import_module(\n33 config[\"module_path\"] + \".\" + config[\"name\"])\n34 _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n35 config[\"module_path\"])\n36 return module\n37 except ImportError as error:\n38 _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n39 \" \" + config[\"module_path\"] + \".\" + config[\"name\"])\n40 _LOGGER.debug(error)\n41 \n42 try:\n43 module = importlib.import_module(\n44 config[\"module_path\"])\n45 _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n46 config[\"module_path\"])\n47 return module\n48 except ImportError as error:\n49 _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n50 \" \" + config[\"module_path\"])\n51 _LOGGER.debug(error)\n52 \n53 _LOGGER.error(\"Failed to load \" + config[\"type\"] +\n54 \" \" + config[\"module_path\"])\n55 return None\n56 \n57 @staticmethod\n58 def check_cache(config):\n59 \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n60 if \"no-cache\" in config \\\n61 and config[\"no-cache\"]:\n62 _LOGGER.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n63 if os.path.isdir(config[\"install_path\"]):\n64 shutil.rmtree(config[\"install_path\"])\n65 if os.path.isfile(config[\"install_path\"] + \".py\"):\n66 os.remove(config[\"install_path\"] + \".py\")\n67 \n68 def build_module_path(self, path_type, config):\n69 \"\"\"Generate the module path from name and type.\"\"\"\n70 if path_type == \"import\":\n71 return MODULES_DIRECTORY + \".\" + config[\"type\"] + \\\n72 \".\" + config[\"name\"]\n73 elif path_type == \"install\":\n74 return self.modules_directory + \"/\" + config[\"type\"] + \\\n75 \"/\" + config[\"name\"]\n76 \n77 @staticmethod\n78 def git_clone(git_url, install_path, branch):\n79 \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n80 process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n81 git_url, install_path], shell=False,\n82 stdout=subprocess.PIPE,\n83 stderr=subprocess.PIPE)\n84 for output in process.communicate():\n85 if output != \"\":\n86 for line in output.splitlines():\n87 _LOGGER.debug(str(line).strip())\n88 process.wait()\n89 \n90 @staticmethod\n91 def pip_install_deps(requirements_path):\n92 \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n93 process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n94 shell=False,\n95 stdout=subprocess.PIPE,\n96 stderr=subprocess.PIPE)\n97 for output in process.communicate():\n98 if output != \"\":\n99 for line in output.splitlines():\n100 _LOGGER.debug(str(line).strip())\n101 process.wait()\n102 \n103 @staticmethod\n104 def create_default_config(config_path):\n105 \"\"\"Create a default config file based on the included example.\"\"\"\n106 _LOGGER.info(\"Creating %s.\", config_path)\n107 config_dir, _ = os.path.split(config_path)\n108 if not os.path.isdir(config_dir):\n109 os.makedirs(config_dir)\n110 shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n111 return config_path\n112 \n113 def load_config_file(self, config_paths):\n114 \"\"\"Load a yaml config file from path.\"\"\"\n115 config_path = \"\"\n116 for possible_path in config_paths:\n117 if not os.path.isfile(possible_path):\n118 _LOGGER.debug(\"Config file \" + possible_path +\n119 \" not found\")\n120 else:\n121 config_path = possible_path\n122 break\n123 \n124 if not config_path:\n125 _LOGGER.info(\"No configuration files found.\")\n126 config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n127 \n128 try:\n129 with open(config_path, 'r') as stream:\n130 _LOGGER.info(\"Loaded config from %s\", config_path)\n131 return yaml.load(stream)\n132 except yaml.YAMLError as error:\n133 self.opsdroid.critical(error, 1)\n134 except FileNotFoundError as error:\n135 self.opsdroid.critical(str(error), 1)\n136 \n137 def setup_modules_directory(self, config):\n138 \"\"\"Create and configure the modules directory.\"\"\"\n139 module_path = os.path.expanduser(\n140 config.get(\"module-path\", DEFAULT_MODULES_PATH))\n141 sys.path.append(module_path)\n142 \n143 if not os.path.isdir(module_path):\n144 os.makedirs(module_path, exist_ok=True)\n145 \n146 self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\n147 \n148 # Create modules directory if doesn't exist\n149 if not os.path.isdir(self.modules_directory):\n150 os.makedirs(self.modules_directory)\n151 \n152 def load_modules_from_config(self, config):\n153 \"\"\"Load all module types based on config.\"\"\"\n154 _LOGGER.debug(\"Loading modules from config\")\n155 \n156 self.setup_modules_directory(config)\n157 \n158 connectors, databases, skills = None, None, None\n159 \n160 if 'databases' in config.keys():\n161 databases = self._load_modules('database', config['databases'])\n162 else:\n163 _LOGGER.warning(\"No databases in configuration\")\n164 \n165 if 'skills' in config.keys():\n166 skills = self._load_modules('skill', config['skills'])\n167 else:\n168 self.opsdroid.critical(\n169 \"No skills in configuration, at least 1 required\", 1)\n170 \n171 if 'connectors' in config.keys():\n172 connectors = self._load_modules('connector', config['connectors'])\n173 else:\n174 self.opsdroid.critical(\n175 \"No connectors in configuration, at least 1 required\", 1)\n176 \n177 return connectors, databases, skills\n178 \n179 def _load_modules(self, modules_type, modules):\n180 \"\"\"Install and load modules.\"\"\"\n181 _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n182 loaded_modules = []\n183 \n184 for module in modules:\n185 \n186 # Set up module config\n187 config = module\n188 config = {} if config is None else config\n189 config[\"name\"] = module[\"name\"]\n190 config[\"type\"] = modules_type\n191 config[\"module_path\"] = self.build_module_path(\"import\", config)\n192 config[\"install_path\"] = self.build_module_path(\"install\", config)\n193 if \"branch\" not in config:\n194 config[\"branch\"] = DEFAULT_MODULE_BRANCH\n195 \n196 # Remove module for reinstall if no-cache set\n197 self.check_cache(config)\n198 \n199 # Install module\n200 self._install_module(config)\n201 \n202 # Import module\n203 self.current_import_config = config\n204 module = self.import_module(config)\n205 if module is not None:\n206 loaded_modules.append({\n207 \"module\": module,\n208 \"config\": config})\n209 else:\n210 _LOGGER.error(\n211 \"Module \" + config[\"name\"] + \" failed to import\")\n212 \n213 return loaded_modules\n214 \n215 def _install_module(self, config):\n216 # pylint: disable=R0201\n217 \"\"\"Install a module.\"\"\"\n218 _LOGGER.debug(\"Installing \" + config[\"name\"])\n219 \n220 if os.path.isdir(config[\"install_path\"]) or \\\n221 os.path.isfile(config[\"install_path\"] + \".py\"):\n222 # TODO Allow for updating or reinstalling of modules\n223 _LOGGER.debug(\"Module \" + config[\"name\"] +\n224 \" already installed, skipping\")\n225 return\n226 \n227 if \"path\" in config:\n228 self._install_local_module(config)\n229 else:\n230 self._install_git_module(config)\n231 \n232 if os.path.isdir(config[\"install_path\"]):\n233 _LOGGER.debug(\"Installed \" + config[\"name\"] +\n234 \" to \" + config[\"install_path\"])\n235 else:\n236 _LOGGER.debug(\"Install of \" + config[\"name\"] + \" failed\")\n237 \n238 # Install module dependancies\n239 if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n240 self.pip_install_deps(config[\"install_path\"] +\n241 \"/requirements.txt\")\n242 \n243 def _install_git_module(self, config):\n244 \"\"\"Install a module from a git repository.\"\"\"\n245 if config is not None and \"repo\" in config:\n246 git_url = config[\"repo\"]\n247 else:\n248 git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n249 \"-\" + config[\"name\"] + \".git\"\n250 \n251 if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n252 # TODO Test if url or ssh path exists\n253 # TODO Handle github authentication\n254 _LOGGER.debug(\"Cloning from remote repository\")\n255 self.git_clone(git_url, config[\"install_path\"],\n256 config[\"branch\"])\n257 else:\n258 if os.path.isdir(git_url):\n259 _LOGGER.debug(\"Cloning from local repository\")\n260 self.git_clone(git_url, config[\"install_path\"],\n261 config[\"branch\"])\n262 else:\n263 _LOGGER.debug(\"Could not find local git repo \" + git_url)\n264 \n265 @staticmethod\n266 def _install_local_module(config):\n267 \"\"\"Install a module from a local path.\"\"\"\n268 installed = False\n269 \n270 installdir, _ = os.path.split(config[\"install_path\"])\n271 if not os.path.isdir(installdir):\n272 os.makedirs(installdir, exist_ok=True)\n273 \n274 if os.path.isdir(config[\"path\"]):\n275 shutil.copytree(config[\"path\"], config[\"install_path\"])\n276 installed = True\n277 \n278 if os.path.isfile(config[\"path\"]):\n279 os.makedirs(config[\"install_path\"], exist_ok=True)\n280 shutil.copyfile(config[\"path\"], config[\"install_path\"] +\n281 \"/__init__.py\")\n282 installed = True\n283 \n284 if not installed:\n285 _LOGGER.error(\"Failed to install from \" + config[\"path\"])\n286\n[end of opsdroid/loader.py]\n[start of opsdroid/const.py]\n1 \"\"\"Constants used by OpsDroid.\"\"\"\n2 import os\n3 \n4 __version__ = \"0.8.0\"\n5 \n6 LOG_FILENAME = 'output.log'\n7 DEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\n8 MODULES_DIRECTORY = \"opsdroid-modules\"\n9 DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n10 DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n11 DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n12 DEFAULT_MODULE_BRANCH = \"master\"\n13 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n14 \"configuration/example_configuration.yaml\")\n15\n[end of opsdroid/const.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -8,6 +8,7 @@\n MODULES_DIRECTORY = \"opsdroid-modules\"\n DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n+DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n DEFAULT_MODULE_BRANCH = \"master\"\n EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\ndiff --git a/opsdroid/loader.py b/opsdroid/loader.py\n--- a/opsdroid/loader.py\n+++ b/opsdroid/loader.py\n@@ -9,7 +9,8 @@\n import yaml\n from opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n- DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\n+ DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\n+ DEFAULT_MODULE_DEPS_PATH)\n \n \n _LOGGER = logging.getLogger(__name__)\n@@ -90,7 +91,11 @@\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n- process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n+ process = subprocess.Popen([\"pip\", \"install\",\n+ \"--target={}\".format(\n+ DEFAULT_MODULE_DEPS_PATH),\n+ \"--ignore-installed\",\n+ \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n@@ -181,6 +186,10 @@\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n \n+ if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\n+ os.makedirs(DEFAULT_MODULE_DEPS_PATH)\n+ sys.path.append(DEFAULT_MODULE_DEPS_PATH)\n+\n for module in modules:\n \n # Set up module config\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/opsdroid/const.py b/opsdroid/const.py\\n--- a/opsdroid/const.py\\n+++ b/opsdroid/const.py\\n@@ -8,6 +8,7 @@\\n MODULES_DIRECTORY = \\\"opsdroid-modules\\\"\\n DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\\\"~\\\"), \\\".opsdroid\\\")\\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \\\"modules\\\")\\n+DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \\\"site-packages\\\")\\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \\\"configuration.yaml\\\")\\n DEFAULT_MODULE_BRANCH = \\\"master\\\"\\n EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\\ndiff --git a/opsdroid/loader.py b/opsdroid/loader.py\\n--- a/opsdroid/loader.py\\n+++ b/opsdroid/loader.py\\n@@ -9,7 +9,8 @@\\n import yaml\\n from opsdroid.const import (\\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\\n- DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\\n+ DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\\n+ DEFAULT_MODULE_DEPS_PATH)\\n \\n \\n _LOGGER = logging.getLogger(__name__)\\n@@ -90,7 +91,11 @@\\n @staticmethod\\n def pip_install_deps(requirements_path):\\n \\\"\\\"\\\"Pip install a requirements.txt file and wait for finish.\\\"\\\"\\\"\\n- process = subprocess.Popen([\\\"pip\\\", \\\"install\\\", \\\"-r\\\", requirements_path],\\n+ process = subprocess.Popen([\\\"pip\\\", \\\"install\\\",\\n+ \\\"--target={}\\\".format(\\n+ DEFAULT_MODULE_DEPS_PATH),\\n+ \\\"--ignore-installed\\\",\\n+ \\\"-r\\\", requirements_path],\\n shell=False,\\n stdout=subprocess.PIPE,\\n stderr=subprocess.PIPE)\\n@@ -181,6 +186,10 @@\\n _LOGGER.debug(\\\"Loading \\\" + modules_type + \\\" modules\\\")\\n loaded_modules = []\\n \\n+ if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\\n+ os.makedirs(DEFAULT_MODULE_DEPS_PATH)\\n+ sys.path.append(DEFAULT_MODULE_DEPS_PATH)\\n+\\n for module in modules:\\n \\n # Set up module config\\n\", \"issue\": \"Module specific site-packages\\nIt could be good for modules to install their dependancies in a specific `site-packages` directory which is only added to the path when it is time to import the modules.\\r\\n\\r\\nA good place could be `~/.opsdroid/site-packages`.\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Class for loading in modules to OpsDroid.\\\"\\\"\\\"\\n\\nimport logging\\nimport os\\nimport sys\\nimport shutil\\nimport subprocess\\nimport importlib\\nimport yaml\\nfrom opsdroid.const import (\\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\\n DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\\n\\n\\n_LOGGER = logging.getLogger(__name__)\\n\\n\\nclass Loader:\\n \\\"\\\"\\\"Class to load in config and modules.\\\"\\\"\\\"\\n\\n def __init__(self, opsdroid):\\n \\\"\\\"\\\"Create object with opsdroid instance.\\\"\\\"\\\"\\n self.opsdroid = opsdroid\\n self.modules_directory = None\\n self.current_import_config = None\\n _LOGGER.debug(\\\"Loaded loader\\\")\\n\\n @staticmethod\\n def import_module(config):\\n \\\"\\\"\\\"Import module namespace as variable and return it.\\\"\\\"\\\"\\n try:\\n module = importlib.import_module(\\n config[\\\"module_path\\\"] + \\\".\\\" + config[\\\"name\\\"])\\n _LOGGER.debug(\\\"Loaded \\\" + config[\\\"type\\\"] + \\\": \\\" +\\n config[\\\"module_path\\\"])\\n return module\\n except ImportError as error:\\n _LOGGER.debug(\\\"Failed to load \\\" + config[\\\"type\\\"] +\\n \\\" \\\" + config[\\\"module_path\\\"] + \\\".\\\" + config[\\\"name\\\"])\\n _LOGGER.debug(error)\\n\\n try:\\n module = importlib.import_module(\\n config[\\\"module_path\\\"])\\n _LOGGER.debug(\\\"Loaded \\\" + config[\\\"type\\\"] + \\\": \\\" +\\n config[\\\"module_path\\\"])\\n return module\\n except ImportError as error:\\n _LOGGER.debug(\\\"Failed to load \\\" + config[\\\"type\\\"] +\\n \\\" \\\" + config[\\\"module_path\\\"])\\n _LOGGER.debug(error)\\n\\n _LOGGER.error(\\\"Failed to load \\\" + config[\\\"type\\\"] +\\n \\\" \\\" + config[\\\"module_path\\\"])\\n return None\\n\\n @staticmethod\\n def check_cache(config):\\n \\\"\\\"\\\"Remove module if 'no-cache' set in config.\\\"\\\"\\\"\\n if \\\"no-cache\\\" in config \\\\\\n and config[\\\"no-cache\\\"]:\\n _LOGGER.debug(\\\"'no-cache' set, removing \\\" + config[\\\"install_path\\\"])\\n if os.path.isdir(config[\\\"install_path\\\"]):\\n shutil.rmtree(config[\\\"install_path\\\"])\\n if os.path.isfile(config[\\\"install_path\\\"] + \\\".py\\\"):\\n os.remove(config[\\\"install_path\\\"] + \\\".py\\\")\\n\\n def build_module_path(self, path_type, config):\\n \\\"\\\"\\\"Generate the module path from name and type.\\\"\\\"\\\"\\n if path_type == \\\"import\\\":\\n return MODULES_DIRECTORY + \\\".\\\" + config[\\\"type\\\"] + \\\\\\n \\\".\\\" + config[\\\"name\\\"]\\n elif path_type == \\\"install\\\":\\n return self.modules_directory + \\\"/\\\" + config[\\\"type\\\"] + \\\\\\n \\\"/\\\" + config[\\\"name\\\"]\\n\\n @staticmethod\\n def git_clone(git_url, install_path, branch):\\n \\\"\\\"\\\"Clone a git repo to a location and wait for finish.\\\"\\\"\\\"\\n process = subprocess.Popen([\\\"git\\\", \\\"clone\\\", \\\"-b\\\", branch,\\n git_url, install_path], shell=False,\\n stdout=subprocess.PIPE,\\n stderr=subprocess.PIPE)\\n for output in process.communicate():\\n if output != \\\"\\\":\\n for line in output.splitlines():\\n _LOGGER.debug(str(line).strip())\\n process.wait()\\n\\n @staticmethod\\n def pip_install_deps(requirements_path):\\n \\\"\\\"\\\"Pip install a requirements.txt file and wait for finish.\\\"\\\"\\\"\\n process = subprocess.Popen([\\\"pip\\\", \\\"install\\\", \\\"-r\\\", requirements_path],\\n shell=False,\\n stdout=subprocess.PIPE,\\n stderr=subprocess.PIPE)\\n for output in process.communicate():\\n if output != \\\"\\\":\\n for line in output.splitlines():\\n _LOGGER.debug(str(line).strip())\\n process.wait()\\n\\n @staticmethod\\n def create_default_config(config_path):\\n \\\"\\\"\\\"Create a default config file based on the included example.\\\"\\\"\\\"\\n _LOGGER.info(\\\"Creating %s.\\\", config_path)\\n config_dir, _ = os.path.split(config_path)\\n if not os.path.isdir(config_dir):\\n os.makedirs(config_dir)\\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\\n return config_path\\n\\n def load_config_file(self, config_paths):\\n \\\"\\\"\\\"Load a yaml config file from path.\\\"\\\"\\\"\\n config_path = \\\"\\\"\\n for possible_path in config_paths:\\n if not os.path.isfile(possible_path):\\n _LOGGER.debug(\\\"Config file \\\" + possible_path +\\n \\\" not found\\\")\\n else:\\n config_path = possible_path\\n break\\n\\n if not config_path:\\n _LOGGER.info(\\\"No configuration files found.\\\")\\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\\n\\n try:\\n with open(config_path, 'r') as stream:\\n _LOGGER.info(\\\"Loaded config from %s\\\", config_path)\\n return yaml.load(stream)\\n except yaml.YAMLError as error:\\n self.opsdroid.critical(error, 1)\\n except FileNotFoundError as error:\\n self.opsdroid.critical(str(error), 1)\\n\\n def setup_modules_directory(self, config):\\n \\\"\\\"\\\"Create and configure the modules directory.\\\"\\\"\\\"\\n module_path = os.path.expanduser(\\n config.get(\\\"module-path\\\", DEFAULT_MODULES_PATH))\\n sys.path.append(module_path)\\n\\n if not os.path.isdir(module_path):\\n os.makedirs(module_path, exist_ok=True)\\n\\n self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\\n\\n # Create modules directory if doesn't exist\\n if not os.path.isdir(self.modules_directory):\\n os.makedirs(self.modules_directory)\\n\\n def load_modules_from_config(self, config):\\n \\\"\\\"\\\"Load all module types based on config.\\\"\\\"\\\"\\n _LOGGER.debug(\\\"Loading modules from config\\\")\\n\\n self.setup_modules_directory(config)\\n\\n connectors, databases, skills = None, None, None\\n\\n if 'databases' in config.keys():\\n databases = self._load_modules('database', config['databases'])\\n else:\\n _LOGGER.warning(\\\"No databases in configuration\\\")\\n\\n if 'skills' in config.keys():\\n skills = self._load_modules('skill', config['skills'])\\n else:\\n self.opsdroid.critical(\\n \\\"No skills in configuration, at least 1 required\\\", 1)\\n\\n if 'connectors' in config.keys():\\n connectors = self._load_modules('connector', config['connectors'])\\n else:\\n self.opsdroid.critical(\\n \\\"No connectors in configuration, at least 1 required\\\", 1)\\n\\n return connectors, databases, skills\\n\\n def _load_modules(self, modules_type, modules):\\n \\\"\\\"\\\"Install and load modules.\\\"\\\"\\\"\\n _LOGGER.debug(\\\"Loading \\\" + modules_type + \\\" modules\\\")\\n loaded_modules = []\\n\\n for module in modules:\\n\\n # Set up module config\\n config = module\\n config = {} if config is None else config\\n config[\\\"name\\\"] = module[\\\"name\\\"]\\n config[\\\"type\\\"] = modules_type\\n config[\\\"module_path\\\"] = self.build_module_path(\\\"import\\\", config)\\n config[\\\"install_path\\\"] = self.build_module_path(\\\"install\\\", config)\\n if \\\"branch\\\" not in config:\\n config[\\\"branch\\\"] = DEFAULT_MODULE_BRANCH\\n\\n # Remove module for reinstall if no-cache set\\n self.check_cache(config)\\n\\n # Install module\\n self._install_module(config)\\n\\n # Import module\\n self.current_import_config = config\\n module = self.import_module(config)\\n if module is not None:\\n loaded_modules.append({\\n \\\"module\\\": module,\\n \\\"config\\\": config})\\n else:\\n _LOGGER.error(\\n \\\"Module \\\" + config[\\\"name\\\"] + \\\" failed to import\\\")\\n\\n return loaded_modules\\n\\n def _install_module(self, config):\\n # pylint: disable=R0201\\n \\\"\\\"\\\"Install a module.\\\"\\\"\\\"\\n _LOGGER.debug(\\\"Installing \\\" + config[\\\"name\\\"])\\n\\n if os.path.isdir(config[\\\"install_path\\\"]) or \\\\\\n os.path.isfile(config[\\\"install_path\\\"] + \\\".py\\\"):\\n # TODO Allow for updating or reinstalling of modules\\n _LOGGER.debug(\\\"Module \\\" + config[\\\"name\\\"] +\\n \\\" already installed, skipping\\\")\\n return\\n\\n if \\\"path\\\" in config:\\n self._install_local_module(config)\\n else:\\n self._install_git_module(config)\\n\\n if os.path.isdir(config[\\\"install_path\\\"]):\\n _LOGGER.debug(\\\"Installed \\\" + config[\\\"name\\\"] +\\n \\\" to \\\" + config[\\\"install_path\\\"])\\n else:\\n _LOGGER.debug(\\\"Install of \\\" + config[\\\"name\\\"] + \\\" failed\\\")\\n\\n # Install module dependancies\\n if os.path.isfile(config[\\\"install_path\\\"] + \\\"/requirements.txt\\\"):\\n self.pip_install_deps(config[\\\"install_path\\\"] +\\n \\\"/requirements.txt\\\")\\n\\n def _install_git_module(self, config):\\n \\\"\\\"\\\"Install a module from a git repository.\\\"\\\"\\\"\\n if config is not None and \\\"repo\\\" in config:\\n git_url = config[\\\"repo\\\"]\\n else:\\n git_url = DEFAULT_GIT_URL + config[\\\"type\\\"] + \\\\\\n \\\"-\\\" + config[\\\"name\\\"] + \\\".git\\\"\\n\\n if any(prefix in git_url for prefix in [\\\"http\\\", \\\"https\\\", \\\"ssh\\\"]):\\n # TODO Test if url or ssh path exists\\n # TODO Handle github authentication\\n _LOGGER.debug(\\\"Cloning from remote repository\\\")\\n self.git_clone(git_url, config[\\\"install_path\\\"],\\n config[\\\"branch\\\"])\\n else:\\n if os.path.isdir(git_url):\\n _LOGGER.debug(\\\"Cloning from local repository\\\")\\n self.git_clone(git_url, config[\\\"install_path\\\"],\\n config[\\\"branch\\\"])\\n else:\\n _LOGGER.debug(\\\"Could not find local git repo \\\" + git_url)\\n\\n @staticmethod\\n def _install_local_module(config):\\n \\\"\\\"\\\"Install a module from a local path.\\\"\\\"\\\"\\n installed = False\\n\\n installdir, _ = os.path.split(config[\\\"install_path\\\"])\\n if not os.path.isdir(installdir):\\n os.makedirs(installdir, exist_ok=True)\\n\\n if os.path.isdir(config[\\\"path\\\"]):\\n shutil.copytree(config[\\\"path\\\"], config[\\\"install_path\\\"])\\n installed = True\\n\\n if os.path.isfile(config[\\\"path\\\"]):\\n os.makedirs(config[\\\"install_path\\\"], exist_ok=True)\\n shutil.copyfile(config[\\\"path\\\"], config[\\\"install_path\\\"] +\\n \\\"/__init__.py\\\")\\n installed = True\\n\\n if not installed:\\n _LOGGER.error(\\\"Failed to install from \\\" + config[\\\"path\\\"])\\n\", \"path\": \"opsdroid/loader.py\"}, {\"content\": \"\\\"\\\"\\\"Constants used by OpsDroid.\\\"\\\"\\\"\\nimport os\\n\\n__version__ = \\\"0.8.0\\\"\\n\\nLOG_FILENAME = 'output.log'\\nDEFAULT_GIT_URL = \\\"https://github.com/opsdroid/\\\"\\nMODULES_DIRECTORY = \\\"opsdroid-modules\\\"\\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\\\"~\\\"), \\\".opsdroid\\\")\\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \\\"modules\\\")\\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \\\"configuration.yaml\\\")\\nDEFAULT_MODULE_BRANCH = \\\"master\\\"\\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\\n \\\"configuration/example_configuration.yaml\\\")\\n\", \"path\": \"opsdroid/const.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3675,"string":"3,675"},"num_tokens_diff":{"kind":"number","value":463,"string":"463"}}},{"rowIdx":18121,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_38680"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"zigpy__zha-device-handlers-528"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\n[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery\nFirst \r\n\r\nBlind:\r\n\r\nNever updates the battery automatically.\r\n\r\nHowever if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.\r\n\r\nRemote:\r\n\r\ndoes update battery on its own.\r\n\r\nBoth:\r\n\r\nboth values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.\r\n![zha cluster 2](https://user-images.githubusercontent.com/44187409/77225998-78782080-6b6c-11ea-908b-7518207189ad.JPG)\r\n![zha cluster 1](https://user-images.githubusercontent.com/44187409/77226005-8332b580-6b6c-11ea-8a51-03e128548433.JPG)\r\n\r\n\n\n\n\n[start of zhaquirks/ikea/blinds.py]\n1 \"\"\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n2 from zigpy.profiles import zha\n3 from zigpy.quirks import CustomDevice\n4 from zigpy.zcl.clusters.closures import WindowCovering\n5 from zigpy.zcl.clusters.general import (\n6 Basic,\n7 Groups,\n8 Identify,\n9 Ota,\n10 PollControl,\n11 PowerConfiguration,\n12 Scenes,\n13 )\n14 from zigpy.zcl.clusters.lightlink import LightLink\n15 \n16 from . import IKEA\n17 from .. import DoublingPowerConfigurationCluster\n18 from ..const import (\n19 DEVICE_TYPE,\n20 ENDPOINTS,\n21 INPUT_CLUSTERS,\n22 MODELS_INFO,\n23 OUTPUT_CLUSTERS,\n24 PROFILE_ID,\n25 )\n26 \n27 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n28 \n29 \n30 class IkeaTradfriRollerBlinds(CustomDevice):\n31 \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n32 \n33 signature = {\n34 # \n38 MODELS_INFO: [\n39 (IKEA, \"FYRTUR block-out roller blind\"),\n40 (IKEA, \"KADRILJ roller blind\"),\n41 ],\n42 ENDPOINTS: {\n43 1: {\n44 PROFILE_ID: zha.PROFILE_ID,\n45 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n46 INPUT_CLUSTERS: [\n47 Basic.cluster_id,\n48 PowerConfiguration.cluster_id,\n49 Identify.cluster_id,\n50 Groups.cluster_id,\n51 Scenes.cluster_id,\n52 PollControl.cluster_id,\n53 WindowCovering.cluster_id,\n54 LightLink.cluster_id,\n55 ],\n56 OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n57 }\n58 },\n59 }\n60 \n61 replacement = {\n62 \"endpoints\": {\n63 1: {\n64 \"profile_id\": zha.PROFILE_ID,\n65 \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n66 \"input_clusters\": [\n67 Basic.cluster_id,\n68 DoublingPowerConfigurationCluster,\n69 Identify.cluster_id,\n70 Groups.cluster_id,\n71 Scenes.cluster_id,\n72 PollControl.cluster_id,\n73 WindowCovering.cluster_id,\n74 LightLink.cluster_id,\n75 ],\n76 \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n77 }\n78 }\n79 }\n80\n[end of zhaquirks/ikea/blinds.py]\n[start of zhaquirks/ikea/opencloseremote.py]\n1 \"\"\"Device handler for IKEA of Sweden TRADFRI remote control.\"\"\"\n2 from zigpy.profiles import zha\n3 from zigpy.quirks import CustomDevice\n4 from zigpy.zcl.clusters.closures import WindowCovering\n5 from zigpy.zcl.clusters.general import (\n6 Alarms,\n7 Basic,\n8 Groups,\n9 Identify,\n10 LevelControl,\n11 OnOff,\n12 Ota,\n13 PollControl,\n14 PowerConfiguration,\n15 )\n16 from zigpy.zcl.clusters.lightlink import LightLink\n17 \n18 from . import IKEA\n19 from .. import DoublingPowerConfigurationCluster\n20 from ..const import (\n21 DEVICE_TYPE,\n22 ENDPOINTS,\n23 INPUT_CLUSTERS,\n24 MODELS_INFO,\n25 OUTPUT_CLUSTERS,\n26 PROFILE_ID,\n27 )\n28 \n29 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n30 \n31 \n32 class IkeaTradfriOpenCloseRemote(CustomDevice):\n33 \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n34 \n35 signature = {\n36 MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n37 ENDPOINTS: {\n38 1: {\n39 PROFILE_ID: zha.PROFILE_ID,\n40 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n41 INPUT_CLUSTERS: [\n42 Basic.cluster_id,\n43 PowerConfiguration.cluster_id,\n44 Identify.cluster_id,\n45 Alarms.cluster_id,\n46 PollControl.cluster_id,\n47 LightLink.cluster_id,\n48 IKEA_CLUSTER_ID,\n49 ],\n50 OUTPUT_CLUSTERS: [\n51 Identify.cluster_id,\n52 Groups.cluster_id,\n53 OnOff.cluster_id,\n54 LevelControl.cluster_id,\n55 Ota.cluster_id,\n56 WindowCovering.cluster_id,\n57 LightLink.cluster_id,\n58 ],\n59 }\n60 },\n61 }\n62 \n63 replacement = {\n64 MODELS_INFO: [(IKEA, \"TRADFRI open/close remote\")],\n65 ENDPOINTS: {\n66 1: {\n67 PROFILE_ID: zha.PROFILE_ID,\n68 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n69 INPUT_CLUSTERS: [\n70 Basic.cluster_id,\n71 DoublingPowerConfigurationCluster,\n72 Identify.cluster_id,\n73 Alarms.cluster_id,\n74 PollControl.cluster_id,\n75 LightLink.cluster_id,\n76 IKEA_CLUSTER_ID,\n77 ],\n78 OUTPUT_CLUSTERS: [\n79 Identify.cluster_id,\n80 Groups.cluster_id,\n81 OnOff.cluster_id,\n82 LevelControl.cluster_id,\n83 Ota.cluster_id,\n84 WindowCovering.cluster_id,\n85 LightLink.cluster_id,\n86 ],\n87 }\n88 },\n89 }\n90\n[end of zhaquirks/ikea/opencloseremote.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py\n--- a/zhaquirks/ikea/blinds.py\n+++ b/zhaquirks/ikea/blinds.py\n@@ -31,9 +31,9 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n \n signature = {\n- # \n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n@@ -52,6 +52,7 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n@@ -59,11 +60,11 @@\n }\n \n replacement = {\n- \"endpoints\": {\n+ ENDPOINTS: {\n 1: {\n- \"profile_id\": zha.PROFILE_ID,\n- \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n- \"input_clusters\": [\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n+ INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n@@ -72,8 +73,9 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n- \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\ndiff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py\n--- a/zhaquirks/ikea/opencloseremote.py\n+++ b/zhaquirks/ikea/opencloseremote.py\n@@ -33,7 +33,14 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n+ # \n+ MODELS_INFO: [\n+ (\"\\x02KE\", \"TRADFRI open/close remote\"),\n+ (IKEA, \"TRADFRI open/close remote\"),\n+ ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py\\n--- a/zhaquirks/ikea/blinds.py\\n+++ b/zhaquirks/ikea/blinds.py\\n@@ -31,9 +31,9 @@\\n \\\"\\\"\\\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\\\"\\\"\\\"\\n \\n signature = {\\n- # \\n MODELS_INFO: [\\n (IKEA, \\\"FYRTUR block-out roller blind\\\"),\\n@@ -52,6 +52,7 @@\\n PollControl.cluster_id,\\n WindowCovering.cluster_id,\\n LightLink.cluster_id,\\n+ IKEA_CLUSTER_ID,\\n ],\\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\\n }\\n@@ -59,11 +60,11 @@\\n }\\n \\n replacement = {\\n- \\\"endpoints\\\": {\\n+ ENDPOINTS: {\\n 1: {\\n- \\\"profile_id\\\": zha.PROFILE_ID,\\n- \\\"device_type\\\": zha.DeviceType.WINDOW_COVERING_DEVICE,\\n- \\\"input_clusters\\\": [\\n+ PROFILE_ID: zha.PROFILE_ID,\\n+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\\n+ INPUT_CLUSTERS: [\\n Basic.cluster_id,\\n DoublingPowerConfigurationCluster,\\n Identify.cluster_id,\\n@@ -72,8 +73,9 @@\\n PollControl.cluster_id,\\n WindowCovering.cluster_id,\\n LightLink.cluster_id,\\n+ IKEA_CLUSTER_ID,\\n ],\\n- \\\"output_clusters\\\": [Ota.cluster_id, LightLink.cluster_id],\\n+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\\n }\\n }\\n }\\ndiff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py\\n--- a/zhaquirks/ikea/opencloseremote.py\\n+++ b/zhaquirks/ikea/opencloseremote.py\\n@@ -33,7 +33,14 @@\\n \\\"\\\"\\\"Custom device representing IKEA of Sweden TRADFRI remote control.\\\"\\\"\\\"\\n \\n signature = {\\n- MODELS_INFO: [(\\\"\\\\x02KE\\\", \\\"TRADFRI open/close remote\\\")],\\n+ # \\n+ MODELS_INFO: [\\n+ (\\\"\\\\x02KE\\\", \\\"TRADFRI open/close remote\\\"),\\n+ (IKEA, \\\"TRADFRI open/close remote\\\"),\\n+ ],\\n ENDPOINTS: {\\n 1: {\\n PROFILE_ID: zha.PROFILE_ID,\\n\", \"issue\": \"[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery\\nFirst \\r\\n\\r\\nBlind:\\r\\n\\r\\nNever updates the battery automatically.\\r\\n\\r\\nHowever if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.\\r\\n\\r\\nRemote:\\r\\n\\r\\ndoes update battery on its own.\\r\\n\\r\\nBoth:\\r\\n\\r\\nboth values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.\\r\\n![zha cluster 2](https://user-images.githubusercontent.com/44187409/77225998-78782080-6b6c-11ea-908b-7518207189ad.JPG)\\r\\n![zha cluster 1](https://user-images.githubusercontent.com/44187409/77226005-8332b580-6b6c-11ea-8a51-03e128548433.JPG)\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\\\"\\\"\\\"\\nfrom zigpy.profiles import zha\\nfrom zigpy.quirks import CustomDevice\\nfrom zigpy.zcl.clusters.closures import WindowCovering\\nfrom zigpy.zcl.clusters.general import (\\n Basic,\\n Groups,\\n Identify,\\n Ota,\\n PollControl,\\n PowerConfiguration,\\n Scenes,\\n)\\nfrom zigpy.zcl.clusters.lightlink import LightLink\\n\\nfrom . import IKEA\\nfrom .. import DoublingPowerConfigurationCluster\\nfrom ..const import (\\n DEVICE_TYPE,\\n ENDPOINTS,\\n INPUT_CLUSTERS,\\n MODELS_INFO,\\n OUTPUT_CLUSTERS,\\n PROFILE_ID,\\n)\\n\\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\\n\\n\\nclass IkeaTradfriRollerBlinds(CustomDevice):\\n \\\"\\\"\\\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\\\"\\\"\\\"\\n\\n signature = {\\n # \\n MODELS_INFO: [\\n (IKEA, \\\"FYRTUR block-out roller blind\\\"),\\n (IKEA, \\\"KADRILJ roller blind\\\"),\\n ],\\n ENDPOINTS: {\\n 1: {\\n PROFILE_ID: zha.PROFILE_ID,\\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\\n INPUT_CLUSTERS: [\\n Basic.cluster_id,\\n PowerConfiguration.cluster_id,\\n Identify.cluster_id,\\n Groups.cluster_id,\\n Scenes.cluster_id,\\n PollControl.cluster_id,\\n WindowCovering.cluster_id,\\n LightLink.cluster_id,\\n ],\\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\\n }\\n },\\n }\\n\\n replacement = {\\n \\\"endpoints\\\": {\\n 1: {\\n \\\"profile_id\\\": zha.PROFILE_ID,\\n \\\"device_type\\\": zha.DeviceType.WINDOW_COVERING_DEVICE,\\n \\\"input_clusters\\\": [\\n Basic.cluster_id,\\n DoublingPowerConfigurationCluster,\\n Identify.cluster_id,\\n Groups.cluster_id,\\n Scenes.cluster_id,\\n PollControl.cluster_id,\\n WindowCovering.cluster_id,\\n LightLink.cluster_id,\\n ],\\n \\\"output_clusters\\\": [Ota.cluster_id, LightLink.cluster_id],\\n }\\n }\\n }\\n\", \"path\": \"zhaquirks/ikea/blinds.py\"}, {\"content\": \"\\\"\\\"\\\"Device handler for IKEA of Sweden TRADFRI remote control.\\\"\\\"\\\"\\nfrom zigpy.profiles import zha\\nfrom zigpy.quirks import CustomDevice\\nfrom zigpy.zcl.clusters.closures import WindowCovering\\nfrom zigpy.zcl.clusters.general import (\\n Alarms,\\n Basic,\\n Groups,\\n Identify,\\n LevelControl,\\n OnOff,\\n Ota,\\n PollControl,\\n PowerConfiguration,\\n)\\nfrom zigpy.zcl.clusters.lightlink import LightLink\\n\\nfrom . import IKEA\\nfrom .. import DoublingPowerConfigurationCluster\\nfrom ..const import (\\n DEVICE_TYPE,\\n ENDPOINTS,\\n INPUT_CLUSTERS,\\n MODELS_INFO,\\n OUTPUT_CLUSTERS,\\n PROFILE_ID,\\n)\\n\\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\\n\\n\\nclass IkeaTradfriOpenCloseRemote(CustomDevice):\\n \\\"\\\"\\\"Custom device representing IKEA of Sweden TRADFRI remote control.\\\"\\\"\\\"\\n\\n signature = {\\n MODELS_INFO: [(\\\"\\\\x02KE\\\", \\\"TRADFRI open/close remote\\\")],\\n ENDPOINTS: {\\n 1: {\\n PROFILE_ID: zha.PROFILE_ID,\\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\\n INPUT_CLUSTERS: [\\n Basic.cluster_id,\\n PowerConfiguration.cluster_id,\\n Identify.cluster_id,\\n Alarms.cluster_id,\\n PollControl.cluster_id,\\n LightLink.cluster_id,\\n IKEA_CLUSTER_ID,\\n ],\\n OUTPUT_CLUSTERS: [\\n Identify.cluster_id,\\n Groups.cluster_id,\\n OnOff.cluster_id,\\n LevelControl.cluster_id,\\n Ota.cluster_id,\\n WindowCovering.cluster_id,\\n LightLink.cluster_id,\\n ],\\n }\\n },\\n }\\n\\n replacement = {\\n MODELS_INFO: [(IKEA, \\\"TRADFRI open/close remote\\\")],\\n ENDPOINTS: {\\n 1: {\\n PROFILE_ID: zha.PROFILE_ID,\\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\\n INPUT_CLUSTERS: [\\n Basic.cluster_id,\\n DoublingPowerConfigurationCluster,\\n Identify.cluster_id,\\n Alarms.cluster_id,\\n PollControl.cluster_id,\\n LightLink.cluster_id,\\n IKEA_CLUSTER_ID,\\n ],\\n OUTPUT_CLUSTERS: [\\n Identify.cluster_id,\\n Groups.cluster_id,\\n OnOff.cluster_id,\\n LevelControl.cluster_id,\\n Ota.cluster_id,\\n WindowCovering.cluster_id,\\n LightLink.cluster_id,\\n ],\\n }\\n },\\n }\\n\", \"path\": \"zhaquirks/ikea/opencloseremote.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2211,"string":"2,211"},"num_tokens_diff":{"kind":"number","value":789,"string":"789"}}},{"rowIdx":18122,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_12128"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"nf-core__tools-1357"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nLinting should fail if pipeline module file is edited\nIn https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.\r\n\r\nThe linting warns me that something is amiss:\r\n\r\n```\r\n│ bismark/align │ modules/nf-cor… │ Local copy of │\r\n│ │ │ module outdated │\r\n```\r\n\r\nBut it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.\r\n\r\nPossible that the code for this is lurking in a PR that's waiting to be merged..\n\n\n\n[start of nf_core/modules/lint/module_changes.py]\n1 \"\"\"\n2 Check whether the content of a module has changed compared to the original repository\n3 \"\"\"\n4 import os\n5 import requests\n6 import rich\n7 from nf_core.modules.lint import LintResult\n8 \n9 \n10 def module_changes(module_lint_object, module):\n11 \"\"\"\n12 Checks whether installed nf-core modules have changed compared to the\n13 original repository\n14 Downloads the 'main.nf' and 'meta.yml' files for every module\n15 and compares them to the local copies\n16 \n17 If the module has a 'git_sha', the file content is checked against this sha\n18 \"\"\"\n19 files_to_check = [\"main.nf\", \"meta.yml\"]\n20 \n21 # Loop over nf-core modules\n22 module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\"\n23 \n24 # If module.git_sha specified, check specific commit version for changes\n25 if module.git_sha:\n26 module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\"\n27 \n28 for f in files_to_check:\n29 # open local copy, continue if file not found (a failed message has already been issued in this case)\n30 try:\n31 local_copy = open(os.path.join(module.module_dir, f), \"r\").read()\n32 except FileNotFoundError as e:\n33 continue\n34 \n35 # Download remote copy and compare\n36 url = module_base_url + f\n37 r = requests.get(url=url)\n38 \n39 if r.status_code != 200:\n40 module.warned.append(\n41 (\n42 \"check_local_copy\",\n43 f\"Could not fetch remote copy, skipping comparison.\",\n44 f\"{os.path.join(module.module_dir, f)}\",\n45 )\n46 )\n47 else:\n48 try:\n49 remote_copy = r.content.decode(\"utf-8\")\n50 \n51 if local_copy != remote_copy:\n52 module.warned.append(\n53 (\n54 \"check_local_copy\",\n55 \"Local copy of module outdated\",\n56 f\"{os.path.join(module.module_dir, f)}\",\n57 )\n58 )\n59 else:\n60 module.passed.append(\n61 (\n62 \"check_local_copy\",\n63 \"Local copy of module up to date\",\n64 f\"{os.path.join(module.module_dir, f)}\",\n65 )\n66 )\n67 except UnicodeDecodeError as e:\n68 module.warned.append(\n69 (\n70 \"check_local_copy\",\n71 f\"Could not decode file from {url}. Skipping comparison ({e})\",\n72 f\"{os.path.join(module.module_dir, f)}\",\n73 )\n74 )\n75\n[end of nf_core/modules/lint/module_changes.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py\n--- a/nf_core/modules/lint/module_changes.py\n+++ b/nf_core/modules/lint/module_changes.py\n@@ -49,10 +49,10 @@\n remote_copy = r.content.decode(\"utf-8\")\n \n if local_copy != remote_copy:\n- module.warned.append(\n+ module.failed.append(\n (\n \"check_local_copy\",\n- \"Local copy of module outdated\",\n+ \"Local copy of module does not match remote\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py\\n--- a/nf_core/modules/lint/module_changes.py\\n+++ b/nf_core/modules/lint/module_changes.py\\n@@ -49,10 +49,10 @@\\n remote_copy = r.content.decode(\\\"utf-8\\\")\\n \\n if local_copy != remote_copy:\\n- module.warned.append(\\n+ module.failed.append(\\n (\\n \\\"check_local_copy\\\",\\n- \\\"Local copy of module outdated\\\",\\n+ \\\"Local copy of module does not match remote\\\",\\n f\\\"{os.path.join(module.module_dir, f)}\\\",\\n )\\n )\\n\", \"issue\": \"Linting should fail if pipeline module file is edited\\nIn https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.\\r\\n\\r\\nThe linting warns me that something is amiss:\\r\\n\\r\\n```\\r\\n\\u2502 bismark/align \\u2502 modules/nf-cor\\u2026 \\u2502 Local copy of \\u2502\\r\\n\\u2502 \\u2502 \\u2502 module outdated \\u2502\\r\\n```\\r\\n\\r\\nBut it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.\\r\\n\\r\\nPossible that the code for this is lurking in a PR that's waiting to be merged..\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"\\nCheck whether the content of a module has changed compared to the original repository\\n\\\"\\\"\\\"\\nimport os\\nimport requests\\nimport rich\\nfrom nf_core.modules.lint import LintResult\\n\\n\\ndef module_changes(module_lint_object, module):\\n \\\"\\\"\\\"\\n Checks whether installed nf-core modules have changed compared to the\\n original repository\\n Downloads the 'main.nf' and 'meta.yml' files for every module\\n and compares them to the local copies\\n\\n If the module has a 'git_sha', the file content is checked against this sha\\n \\\"\\\"\\\"\\n files_to_check = [\\\"main.nf\\\", \\\"meta.yml\\\"]\\n\\n # Loop over nf-core modules\\n module_base_url = f\\\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\\\"\\n\\n # If module.git_sha specified, check specific commit version for changes\\n if module.git_sha:\\n module_base_url = f\\\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\\\"\\n\\n for f in files_to_check:\\n # open local copy, continue if file not found (a failed message has already been issued in this case)\\n try:\\n local_copy = open(os.path.join(module.module_dir, f), \\\"r\\\").read()\\n except FileNotFoundError as e:\\n continue\\n\\n # Download remote copy and compare\\n url = module_base_url + f\\n r = requests.get(url=url)\\n\\n if r.status_code != 200:\\n module.warned.append(\\n (\\n \\\"check_local_copy\\\",\\n f\\\"Could not fetch remote copy, skipping comparison.\\\",\\n f\\\"{os.path.join(module.module_dir, f)}\\\",\\n )\\n )\\n else:\\n try:\\n remote_copy = r.content.decode(\\\"utf-8\\\")\\n\\n if local_copy != remote_copy:\\n module.warned.append(\\n (\\n \\\"check_local_copy\\\",\\n \\\"Local copy of module outdated\\\",\\n f\\\"{os.path.join(module.module_dir, f)}\\\",\\n )\\n )\\n else:\\n module.passed.append(\\n (\\n \\\"check_local_copy\\\",\\n \\\"Local copy of module up to date\\\",\\n f\\\"{os.path.join(module.module_dir, f)}\\\",\\n )\\n )\\n except UnicodeDecodeError as e:\\n module.warned.append(\\n (\\n \\\"check_local_copy\\\",\\n f\\\"Could not decode file from {url}. Skipping comparison ({e})\\\",\\n f\\\"{os.path.join(module.module_dir, f)}\\\",\\n )\\n )\\n\", \"path\": \"nf_core/modules/lint/module_changes.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1365,"string":"1,365"},"num_tokens_diff":{"kind":"number","value":142,"string":"142"}}},{"rowIdx":18123,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_6286"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"digitalfabrik__integreat-cms-284"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nInclude tailwind.css via npm instead of static file\nAt the moment, we include tailwind.css as a static file in our repo.\r\nInstead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.\n\n\n\n[start of backend/cms/views/media/media_list_view.py]\n1 from django.contrib.auth.decorators import login_required\n2 from django.shortcuts import render\n3 from django.utils.decorators import method_decorator\n4 from django.views.generic import TemplateView\n5 \n6 from ...decorators import region_permission_required\n7 from ...models import Document\n8 \n9 \n10 @method_decorator(login_required, name='dispatch')\n11 @method_decorator(region_permission_required, name='dispatch')\n12 class MediaListView(TemplateView):\n13 template_name = 'media/list.html'\n14 base_context = {'current_menu_item': 'media'}\n15 \n16 def get(self, request, *args, **kwargs):\n17 documents = Document.objects.all()\n18 \n19 return render(\n20 request,\n21 self.template_name,\n22 {\n23 **self.base_context,\n24 'documents': documents\n25 }\n26 )\n27\n[end of backend/cms/views/media/media_list_view.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py\n--- a/backend/cms/views/media/media_list_view.py\n+++ b/backend/cms/views/media/media_list_view.py\n@@ -10,7 +10,7 @@\n @method_decorator(login_required, name='dispatch')\n @method_decorator(region_permission_required, name='dispatch')\n class MediaListView(TemplateView):\n- template_name = 'media/list.html'\n+ template_name = 'media/media_list.html'\n base_context = {'current_menu_item': 'media'}\n \n def get(self, request, *args, **kwargs):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py\\n--- a/backend/cms/views/media/media_list_view.py\\n+++ b/backend/cms/views/media/media_list_view.py\\n@@ -10,7 +10,7 @@\\n @method_decorator(login_required, name='dispatch')\\n @method_decorator(region_permission_required, name='dispatch')\\n class MediaListView(TemplateView):\\n- template_name = 'media/list.html'\\n+ template_name = 'media/media_list.html'\\n base_context = {'current_menu_item': 'media'}\\n \\n def get(self, request, *args, **kwargs):\\n\", \"issue\": \"Include tailwind.css via npm instead of static file\\nAt the moment, we include tailwind.css as a static file in our repo.\\r\\nInstead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.\\n\", \"before_files\": [{\"content\": \"from django.contrib.auth.decorators import login_required\\nfrom django.shortcuts import render\\nfrom django.utils.decorators import method_decorator\\nfrom django.views.generic import TemplateView\\n\\nfrom ...decorators import region_permission_required\\nfrom ...models import Document\\n\\n\\n@method_decorator(login_required, name='dispatch')\\n@method_decorator(region_permission_required, name='dispatch')\\nclass MediaListView(TemplateView):\\n template_name = 'media/list.html'\\n base_context = {'current_menu_item': 'media'}\\n\\n def get(self, request, *args, **kwargs):\\n documents = Document.objects.all()\\n\\n return render(\\n request,\\n self.template_name,\\n {\\n **self.base_context,\\n 'documents': documents\\n }\\n )\\n\", \"path\": \"backend/cms/views/media/media_list_view.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":806,"string":"806"},"num_tokens_diff":{"kind":"number","value":131,"string":"131"}}},{"rowIdx":18124,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_378"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"encode__uvicorn-1099"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nPackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\n### Checklist\r\n\r\n\r\n\r\n- [X] The bug is reproducible against the latest release and/or `master`.\r\n- [X] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nWhen adding uvicorn package with poetry the following warning is raised:\r\n\r\n PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\r\n\r\nbecause the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.\r\n\r\nBecause of this, the `click` dependency is not installed and uvicorn may not work.\r\n\r\n### To reproduce\r\n\r\nJust execute `poetry add uvicorn` in any environment.\r\n\r\n### Expected behavior\r\n\r\nTo install `uvicorn` correctly together with all its dependencies.\r\n\r\n### Actual behavior\r\n\r\nThe `click` dependency is not installed and uvicorn may not work.\n\n\n\n[start of setup.py]\n1 #!/usr/bin/env python\n2 # -*- coding: utf-8 -*-\n3 \n4 import os\n5 import re\n6 \n7 from setuptools import setup\n8 \n9 \n10 def get_version(package):\n11 \"\"\"\n12 Return package version as listed in `__version__` in `init.py`.\n13 \"\"\"\n14 path = os.path.join(package, \"__init__.py\")\n15 init_py = open(path, \"r\", encoding=\"utf8\").read()\n16 return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n17 \n18 \n19 def get_long_description():\n20 \"\"\"\n21 Return the README.\n22 \"\"\"\n23 return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n24 \n25 \n26 def get_packages(package):\n27 \"\"\"\n28 Return root package and all sub-packages.\n29 \"\"\"\n30 return [\n31 dirpath\n32 for dirpath, dirnames, filenames in os.walk(package)\n33 if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n34 ]\n35 \n36 \n37 env_marker_cpython = (\n38 \"sys_platform != 'win32'\"\n39 \" and (sys_platform != 'cygwin'\"\n40 \" and platform_python_implementation != 'PyPy')\"\n41 )\n42 \n43 env_marker_win = \"sys_platform == 'win32'\"\n44 env_marker_below_38 = \"python_version < '3.8'\"\n45 \n46 minimal_requirements = [\n47 \"asgiref>=3.4.0\",\n48 \"click>=7.*\",\n49 \"h11>=0.8\",\n50 \"typing-extensions;\" + env_marker_below_38,\n51 ]\n52 \n53 \n54 extra_requirements = [\n55 \"websockets>=9.1\",\n56 \"httptools==0.2.*\",\n57 \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n58 \"colorama>=0.4;\" + env_marker_win,\n59 \"watchgod>=0.6\",\n60 \"python-dotenv>=0.13\",\n61 \"PyYAML>=5.1\",\n62 ]\n63 \n64 \n65 setup(\n66 name=\"uvicorn\",\n67 version=get_version(\"uvicorn\"),\n68 url=\"https://github.com/encode/uvicorn\",\n69 license=\"BSD\",\n70 description=\"The lightning-fast ASGI server.\",\n71 long_description=get_long_description(),\n72 long_description_content_type=\"text/markdown\",\n73 author=\"Tom Christie\",\n74 author_email=\"tom@tomchristie.com\",\n75 packages=get_packages(\"uvicorn\"),\n76 install_requires=minimal_requirements,\n77 extras_require={\"standard\": extra_requirements},\n78 include_package_data=True,\n79 classifiers=[\n80 \"Development Status :: 4 - Beta\",\n81 \"Environment :: Web Environment\",\n82 \"Intended Audience :: Developers\",\n83 \"License :: OSI Approved :: BSD License\",\n84 \"Operating System :: OS Independent\",\n85 \"Topic :: Internet :: WWW/HTTP\",\n86 \"Programming Language :: Python :: 3\",\n87 \"Programming Language :: Python :: 3.6\",\n88 \"Programming Language :: Python :: 3.7\",\n89 \"Programming Language :: Python :: 3.8\",\n90 \"Programming Language :: Python :: 3.9\",\n91 \"Programming Language :: Python :: Implementation :: CPython\",\n92 \"Programming Language :: Python :: Implementation :: PyPy\",\n93 ],\n94 entry_points=\"\"\"\n95 [console_scripts]\n96 uvicorn=uvicorn.main:main\n97 \"\"\",\n98 )\n99\n[end of setup.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n \n minimal_requirements = [\n \"asgiref>=3.4.0\",\n- \"click>=7.*\",\n+ \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n ]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/setup.py b/setup.py\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -45,7 +45,7 @@\\n \\n minimal_requirements = [\\n \\\"asgiref>=3.4.0\\\",\\n- \\\"click>=7.*\\\",\\n+ \\\"click>=7.0\\\",\\n \\\"h11>=0.8\\\",\\n \\\"typing-extensions;\\\" + env_marker_below_38,\\n ]\\n\", \"issue\": \"PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\\n### Checklist\\r\\n\\r\\n\\r\\n\\r\\n- [X] The bug is reproducible against the latest release and/or `master`.\\r\\n- [X] There are no similar issues or pull requests to fix it yet.\\r\\n\\r\\n### Describe the bug\\r\\n\\r\\nWhen adding uvicorn package with poetry the following warning is raised:\\r\\n\\r\\n PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\\r\\n\\r\\nbecause the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.\\r\\n\\r\\nBecause of this, the `click` dependency is not installed and uvicorn may not work.\\r\\n\\r\\n### To reproduce\\r\\n\\r\\nJust execute `poetry add uvicorn` in any environment.\\r\\n\\r\\n### Expected behavior\\r\\n\\r\\nTo install `uvicorn` correctly together with all its dependencies.\\r\\n\\r\\n### Actual behavior\\r\\n\\r\\nThe `click` dependency is not installed and uvicorn may not work.\\n\", \"before_files\": [{\"content\": \"#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n\\nimport os\\nimport re\\n\\nfrom setuptools import setup\\n\\n\\ndef get_version(package):\\n \\\"\\\"\\\"\\n Return package version as listed in `__version__` in `init.py`.\\n \\\"\\\"\\\"\\n path = os.path.join(package, \\\"__init__.py\\\")\\n init_py = open(path, \\\"r\\\", encoding=\\\"utf8\\\").read()\\n return re.search(\\\"__version__ = ['\\\\\\\"]([^'\\\\\\\"]+)['\\\\\\\"]\\\", init_py).group(1)\\n\\n\\ndef get_long_description():\\n \\\"\\\"\\\"\\n Return the README.\\n \\\"\\\"\\\"\\n return open(\\\"README.md\\\", \\\"r\\\", encoding=\\\"utf8\\\").read()\\n\\n\\ndef get_packages(package):\\n \\\"\\\"\\\"\\n Return root package and all sub-packages.\\n \\\"\\\"\\\"\\n return [\\n dirpath\\n for dirpath, dirnames, filenames in os.walk(package)\\n if os.path.exists(os.path.join(dirpath, \\\"__init__.py\\\"))\\n ]\\n\\n\\nenv_marker_cpython = (\\n \\\"sys_platform != 'win32'\\\"\\n \\\" and (sys_platform != 'cygwin'\\\"\\n \\\" and platform_python_implementation != 'PyPy')\\\"\\n)\\n\\nenv_marker_win = \\\"sys_platform == 'win32'\\\"\\nenv_marker_below_38 = \\\"python_version < '3.8'\\\"\\n\\nminimal_requirements = [\\n \\\"asgiref>=3.4.0\\\",\\n \\\"click>=7.*\\\",\\n \\\"h11>=0.8\\\",\\n \\\"typing-extensions;\\\" + env_marker_below_38,\\n]\\n\\n\\nextra_requirements = [\\n \\\"websockets>=9.1\\\",\\n \\\"httptools==0.2.*\\\",\\n \\\"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \\\" + env_marker_cpython,\\n \\\"colorama>=0.4;\\\" + env_marker_win,\\n \\\"watchgod>=0.6\\\",\\n \\\"python-dotenv>=0.13\\\",\\n \\\"PyYAML>=5.1\\\",\\n]\\n\\n\\nsetup(\\n name=\\\"uvicorn\\\",\\n version=get_version(\\\"uvicorn\\\"),\\n url=\\\"https://github.com/encode/uvicorn\\\",\\n license=\\\"BSD\\\",\\n description=\\\"The lightning-fast ASGI server.\\\",\\n long_description=get_long_description(),\\n long_description_content_type=\\\"text/markdown\\\",\\n author=\\\"Tom Christie\\\",\\n author_email=\\\"tom@tomchristie.com\\\",\\n packages=get_packages(\\\"uvicorn\\\"),\\n install_requires=minimal_requirements,\\n extras_require={\\\"standard\\\": extra_requirements},\\n include_package_data=True,\\n classifiers=[\\n \\\"Development Status :: 4 - Beta\\\",\\n \\\"Environment :: Web Environment\\\",\\n \\\"Intended Audience :: Developers\\\",\\n \\\"License :: OSI Approved :: BSD License\\\",\\n \\\"Operating System :: OS Independent\\\",\\n \\\"Topic :: Internet :: WWW/HTTP\\\",\\n \\\"Programming Language :: Python :: 3\\\",\\n \\\"Programming Language :: Python :: 3.6\\\",\\n \\\"Programming Language :: Python :: 3.7\\\",\\n \\\"Programming Language :: Python :: 3.8\\\",\\n \\\"Programming Language :: Python :: 3.9\\\",\\n \\\"Programming Language :: Python :: Implementation :: CPython\\\",\\n \\\"Programming Language :: Python :: Implementation :: PyPy\\\",\\n ],\\n entry_points=\\\"\\\"\\\"\\n [console_scripts]\\n uvicorn=uvicorn.main:main\\n \\\"\\\"\\\",\\n)\\n\", \"path\": \"setup.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1656,"string":"1,656"},"num_tokens_diff":{"kind":"number","value":92,"string":"92"}}},{"rowIdx":18125,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_31527"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"iterative__dvc-4739"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\ndvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\ndvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\n\n\n\n[start of dvc/command/dag.py]\n1 import argparse\n2 import logging\n3 \n4 from dvc.command.base import CmdBase, append_doc_link\n5 from dvc.exceptions import DvcException\n6 \n7 logger = logging.getLogger(__name__)\n8 \n9 \n10 def _show_ascii(G):\n11 from dvc.dagascii import draw\n12 from dvc.repo.graph import get_pipelines\n13 \n14 pipelines = get_pipelines(G)\n15 \n16 ret = []\n17 for pipeline in pipelines:\n18 ret.append(draw(pipeline.nodes, pipeline.edges))\n19 \n20 return \"\\n\".join(ret)\n21 \n22 \n23 def _show_dot(G):\n24 import io\n25 \n26 from networkx.drawing.nx_pydot import write_dot\n27 \n28 dot_file = io.StringIO()\n29 write_dot(G, dot_file)\n30 return dot_file.getvalue()\n31 \n32 \n33 def _build(G, target=None, full=False):\n34 import networkx as nx\n35 \n36 from dvc.repo.graph import get_pipeline, get_pipelines\n37 \n38 if target:\n39 H = get_pipeline(get_pipelines(G), target)\n40 if not full:\n41 descendants = nx.descendants(G, target)\n42 descendants.add(target)\n43 H.remove_nodes_from(set(G.nodes()) - descendants)\n44 else:\n45 H = G\n46 \n47 def _relabel(stage):\n48 return stage.addressing\n49 \n50 return nx.relabel_nodes(H, _relabel, copy=False)\n51 \n52 \n53 class CmdDAG(CmdBase):\n54 def run(self):\n55 try:\n56 target = None\n57 if self.args.target:\n58 stages = self.repo.collect(self.args.target)\n59 if len(stages) > 1:\n60 logger.error(\n61 f\"'{self.args.target}' contains more than one stage \"\n62 \"{stages}, please specify one stage\"\n63 )\n64 return 1\n65 target = stages[0]\n66 \n67 G = _build(self.repo.graph, target=target, full=self.args.full,)\n68 \n69 if self.args.dot:\n70 logger.info(_show_dot(G))\n71 else:\n72 from dvc.utils.pager import pager\n73 \n74 pager(_show_ascii(G))\n75 \n76 return 0\n77 except DvcException:\n78 msg = \"failed to show \"\n79 if self.args.target:\n80 msg += f\"a pipeline for '{target}'\"\n81 else:\n82 msg += \"pipelines\"\n83 logger.exception(msg)\n84 return 1\n85 \n86 \n87 def add_parser(subparsers, parent_parser):\n88 DAG_HELP = \"Visualize DVC project DAG.\"\n89 dag_parser = subparsers.add_parser(\n90 \"dag\",\n91 parents=[parent_parser],\n92 description=append_doc_link(DAG_HELP, \"dag\"),\n93 help=DAG_HELP,\n94 formatter_class=argparse.RawDescriptionHelpFormatter,\n95 )\n96 dag_parser.add_argument(\n97 \"--dot\",\n98 action=\"store_true\",\n99 default=False,\n100 help=\"Print DAG with .dot format.\",\n101 )\n102 dag_parser.add_argument(\n103 \"--full\",\n104 action=\"store_true\",\n105 default=False,\n106 help=(\n107 \"Show full DAG that the target belongs too, instead of \"\n108 \"showing DAG consisting only of ancestors.\"\n109 ),\n110 )\n111 dag_parser.add_argument(\n112 \"target\",\n113 nargs=\"?\",\n114 help=\"Stage or output to show pipeline for (optional). \"\n115 \"Finds all stages in the workspace by default.\",\n116 )\n117 dag_parser.set_defaults(func=CmdDAG)\n118\n[end of dvc/command/dag.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/dvc/command/dag.py b/dvc/command/dag.py\n--- a/dvc/command/dag.py\n+++ b/dvc/command/dag.py\n@@ -30,7 +30,7 @@\n return dot_file.getvalue()\n \n \n-def _build(G, target=None, full=False):\n+def _build(G, target=None, full=False, outs=False):\n import networkx as nx\n \n from dvc.repo.graph import get_pipeline, get_pipelines\n@@ -44,8 +44,25 @@\n else:\n H = G\n \n- def _relabel(stage):\n- return stage.addressing\n+ if outs:\n+ G = nx.DiGraph()\n+ for stage in H.nodes:\n+ G.add_nodes_from(stage.outs)\n+\n+ for from_stage, to_stage in nx.edge_dfs(H):\n+ G.add_edges_from(\n+ [\n+ (from_out, to_out)\n+ for from_out in from_stage.outs\n+ for to_out in to_stage.outs\n+ ]\n+ )\n+ H = G\n+\n+ def _relabel(node):\n+ from dvc.stage import Stage\n+\n+ return node.addressing if isinstance(node, Stage) else str(node)\n \n return nx.relabel_nodes(H, _relabel, copy=False)\n \n@@ -64,7 +81,12 @@\n return 1\n target = stages[0]\n \n- G = _build(self.repo.graph, target=target, full=self.args.full,)\n+ G = _build(\n+ self.repo.graph,\n+ target=target,\n+ full=self.args.full,\n+ outs=self.args.outs,\n+ )\n \n if self.args.dot:\n logger.info(_show_dot(G))\n@@ -108,6 +130,13 @@\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n+ dag_parser.add_argument(\n+ \"-o\",\n+ \"--outs\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Print output files instead of stages.\",\n+ )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/dvc/command/dag.py b/dvc/command/dag.py\\n--- a/dvc/command/dag.py\\n+++ b/dvc/command/dag.py\\n@@ -30,7 +30,7 @@\\n return dot_file.getvalue()\\n \\n \\n-def _build(G, target=None, full=False):\\n+def _build(G, target=None, full=False, outs=False):\\n import networkx as nx\\n \\n from dvc.repo.graph import get_pipeline, get_pipelines\\n@@ -44,8 +44,25 @@\\n else:\\n H = G\\n \\n- def _relabel(stage):\\n- return stage.addressing\\n+ if outs:\\n+ G = nx.DiGraph()\\n+ for stage in H.nodes:\\n+ G.add_nodes_from(stage.outs)\\n+\\n+ for from_stage, to_stage in nx.edge_dfs(H):\\n+ G.add_edges_from(\\n+ [\\n+ (from_out, to_out)\\n+ for from_out in from_stage.outs\\n+ for to_out in to_stage.outs\\n+ ]\\n+ )\\n+ H = G\\n+\\n+ def _relabel(node):\\n+ from dvc.stage import Stage\\n+\\n+ return node.addressing if isinstance(node, Stage) else str(node)\\n \\n return nx.relabel_nodes(H, _relabel, copy=False)\\n \\n@@ -64,7 +81,12 @@\\n return 1\\n target = stages[0]\\n \\n- G = _build(self.repo.graph, target=target, full=self.args.full,)\\n+ G = _build(\\n+ self.repo.graph,\\n+ target=target,\\n+ full=self.args.full,\\n+ outs=self.args.outs,\\n+ )\\n \\n if self.args.dot:\\n logger.info(_show_dot(G))\\n@@ -108,6 +130,13 @@\\n \\\"showing DAG consisting only of ancestors.\\\"\\n ),\\n )\\n+ dag_parser.add_argument(\\n+ \\\"-o\\\",\\n+ \\\"--outs\\\",\\n+ action=\\\"store_true\\\",\\n+ default=False,\\n+ help=\\\"Print output files instead of stages.\\\",\\n+ )\\n dag_parser.add_argument(\\n \\\"target\\\",\\n nargs=\\\"?\\\",\\n\", \"issue\": \"dvc dag --outs\\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \\r\\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\\r\\nCan you maybe (re)implement this feature?\\r\\nThanks!\\ndvc dag --outs\\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \\r\\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\\r\\nCan you maybe (re)implement this feature?\\r\\nThanks!\\n\", \"before_files\": [{\"content\": \"import argparse\\nimport logging\\n\\nfrom dvc.command.base import CmdBase, append_doc_link\\nfrom dvc.exceptions import DvcException\\n\\nlogger = logging.getLogger(__name__)\\n\\n\\ndef _show_ascii(G):\\n from dvc.dagascii import draw\\n from dvc.repo.graph import get_pipelines\\n\\n pipelines = get_pipelines(G)\\n\\n ret = []\\n for pipeline in pipelines:\\n ret.append(draw(pipeline.nodes, pipeline.edges))\\n\\n return \\\"\\\\n\\\".join(ret)\\n\\n\\ndef _show_dot(G):\\n import io\\n\\n from networkx.drawing.nx_pydot import write_dot\\n\\n dot_file = io.StringIO()\\n write_dot(G, dot_file)\\n return dot_file.getvalue()\\n\\n\\ndef _build(G, target=None, full=False):\\n import networkx as nx\\n\\n from dvc.repo.graph import get_pipeline, get_pipelines\\n\\n if target:\\n H = get_pipeline(get_pipelines(G), target)\\n if not full:\\n descendants = nx.descendants(G, target)\\n descendants.add(target)\\n H.remove_nodes_from(set(G.nodes()) - descendants)\\n else:\\n H = G\\n\\n def _relabel(stage):\\n return stage.addressing\\n\\n return nx.relabel_nodes(H, _relabel, copy=False)\\n\\n\\nclass CmdDAG(CmdBase):\\n def run(self):\\n try:\\n target = None\\n if self.args.target:\\n stages = self.repo.collect(self.args.target)\\n if len(stages) > 1:\\n logger.error(\\n f\\\"'{self.args.target}' contains more than one stage \\\"\\n \\\"{stages}, please specify one stage\\\"\\n )\\n return 1\\n target = stages[0]\\n\\n G = _build(self.repo.graph, target=target, full=self.args.full,)\\n\\n if self.args.dot:\\n logger.info(_show_dot(G))\\n else:\\n from dvc.utils.pager import pager\\n\\n pager(_show_ascii(G))\\n\\n return 0\\n except DvcException:\\n msg = \\\"failed to show \\\"\\n if self.args.target:\\n msg += f\\\"a pipeline for '{target}'\\\"\\n else:\\n msg += \\\"pipelines\\\"\\n logger.exception(msg)\\n return 1\\n\\n\\ndef add_parser(subparsers, parent_parser):\\n DAG_HELP = \\\"Visualize DVC project DAG.\\\"\\n dag_parser = subparsers.add_parser(\\n \\\"dag\\\",\\n parents=[parent_parser],\\n description=append_doc_link(DAG_HELP, \\\"dag\\\"),\\n help=DAG_HELP,\\n formatter_class=argparse.RawDescriptionHelpFormatter,\\n )\\n dag_parser.add_argument(\\n \\\"--dot\\\",\\n action=\\\"store_true\\\",\\n default=False,\\n help=\\\"Print DAG with .dot format.\\\",\\n )\\n dag_parser.add_argument(\\n \\\"--full\\\",\\n action=\\\"store_true\\\",\\n default=False,\\n help=(\\n \\\"Show full DAG that the target belongs too, instead of \\\"\\n \\\"showing DAG consisting only of ancestors.\\\"\\n ),\\n )\\n dag_parser.add_argument(\\n \\\"target\\\",\\n nargs=\\\"?\\\",\\n help=\\\"Stage or output to show pipeline for (optional). \\\"\\n \\\"Finds all stages in the workspace by default.\\\",\\n )\\n dag_parser.set_defaults(func=CmdDAG)\\n\", \"path\": \"dvc/command/dag.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1671,"string":"1,671"},"num_tokens_diff":{"kind":"number","value":471,"string":"471"}}},{"rowIdx":18126,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_32659"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pypi__warehouse-12343"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nPython 3.1 classifier filtering is broken\nWhen [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.\r\n\r\n[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1\r\n\n\n\n\n[start of warehouse/search/queries.py]\n1 # Licensed under the Apache License, Version 2.0 (the \"License\");\n2 # you may not use this file except in compliance with the License.\n3 # You may obtain a copy of the License at\n4 #\n5 # http://www.apache.org/licenses/LICENSE-2.0\n6 #\n7 # Unless required by applicable law or agreed to in writing, software\n8 # distributed under the License is distributed on an \"AS IS\" BASIS,\n9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n10 # See the License for the specific language governing permissions and\n11 # limitations under the License.\n12 \n13 import re\n14 \n15 from elasticsearch_dsl import Q\n16 \n17 SEARCH_FIELDS = [\n18 \"author\",\n19 \"author_email\",\n20 \"description\",\n21 \"download_url\",\n22 \"home_page\",\n23 \"keywords\",\n24 \"license\",\n25 \"maintainer\",\n26 \"maintainer_email\",\n27 \"normalized_name\",\n28 \"platform\",\n29 \"summary\",\n30 ]\n31 SEARCH_BOOSTS = {\n32 \"name\": 10,\n33 \"normalized_name\": 10,\n34 \"description\": 5,\n35 \"keywords\": 5,\n36 \"summary\": 5,\n37 }\n38 SEARCH_FILTER_ORDER = (\n39 \"Framework\",\n40 \"Topic\",\n41 \"Development Status\",\n42 \"License\",\n43 \"Programming Language\",\n44 \"Operating System\",\n45 \"Environment\",\n46 \"Intended Audience\",\n47 \"Natural Language\",\n48 )\n49 \n50 \n51 def get_es_query(es, terms, order, classifiers):\n52 \"\"\"\n53 Returns an Elasticsearch query from data from the request.\n54 \"\"\"\n55 if not terms:\n56 query = es.query()\n57 else:\n58 bool_query = gather_es_queries(terms)\n59 query = es.query(bool_query)\n60 query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n61 \n62 # Require match to all specified classifiers\n63 for classifier in classifiers:\n64 query = query.query(\"prefix\", classifiers=classifier)\n65 \n66 query = query_for_order(query, order)\n67 return query\n68 \n69 \n70 def gather_es_queries(q):\n71 quoted_string, unquoted_string = filter_query(q)\n72 must = [form_query(\"phrase\", i) for i in quoted_string] + [\n73 form_query(\"best_fields\", i) for i in unquoted_string\n74 ]\n75 \n76 bool_query = Q(\"bool\", must=must)\n77 \n78 # Allow to optionally match on prefix\n79 # if ``q`` is longer than one character.\n80 if len(q) > 1:\n81 bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n82 return bool_query\n83 \n84 \n85 def filter_query(s):\n86 \"\"\"\n87 Filters given query with the below regex\n88 and returns lists of quoted and unquoted strings\n89 \"\"\"\n90 matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n91 result_quoted = [t[0].strip() for t in matches if t[0]]\n92 result_unquoted = [t[1].strip() for t in matches if t[1]]\n93 return result_quoted, result_unquoted\n94 \n95 \n96 def form_query(query_type, query):\n97 \"\"\"\n98 Returns a multi match query\n99 \"\"\"\n100 fields = [\n101 field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n102 for field in SEARCH_FIELDS\n103 ]\n104 return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n105 \n106 \n107 def query_for_order(query, order):\n108 \"\"\"\n109 Applies transformations on the ES query based on the search order.\n110 \n111 Order is assumed to be a string with the name of a field with an optional\n112 hyphen to indicate descending sort order.\n113 \"\"\"\n114 if order == \"\": # relevance should not sort\n115 return query\n116 \n117 field = order[order.find(\"-\") + 1 :]\n118 sort_info = {\n119 field: {\n120 \"order\": \"desc\" if order.startswith(\"-\") else \"asc\",\n121 \"unmapped_type\": \"long\",\n122 }\n123 }\n124 query = query.sort(sort_info)\n125 return query\n126\n[end of warehouse/search/queries.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py\n--- a/warehouse/search/queries.py\n+++ b/warehouse/search/queries.py\n@@ -52,36 +52,45 @@\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n+ classifier_q = Q(\n+ \"bool\",\n+ # Theh results must have all selected classifiers\n+ must=[\n+ Q(\n+ \"bool\",\n+ should=[\n+ # Term search for the exact classifier\n+ Q(\"term\", classifiers=classifier),\n+ # Prefix search for potential children classifiers\n+ Q(\"prefix\", classifiers=classifier + \" :: \"),\n+ ],\n+ )\n+ for classifier in classifiers\n+ ],\n+ )\n if not terms:\n- query = es.query()\n+ query = es.query(classifier_q) if classifiers else es.query()\n else:\n- bool_query = gather_es_queries(terms)\n+ quoted_string, unquoted_string = filter_query(terms)\n+ bool_query = Q(\n+ \"bool\",\n+ must=[form_query(\"phrase\", i) for i in quoted_string]\n+ + [form_query(\"best_fields\", i) for i in unquoted_string]\n+ + ([classifier_q] if classifiers else []),\n+ )\n+\n+ # Allow to optionally match on prefix\n+ # if ``q`` is longer than one character.\n+ if len(terms) > 1:\n+ bool_query = bool_query | Q(\"prefix\", normalized_name=terms)\n+\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n \n- # Require match to all specified classifiers\n- for classifier in classifiers:\n- query = query.query(\"prefix\", classifiers=classifier)\n-\n query = query_for_order(query, order)\n return query\n \n \n-def gather_es_queries(q):\n- quoted_string, unquoted_string = filter_query(q)\n- must = [form_query(\"phrase\", i) for i in quoted_string] + [\n- form_query(\"best_fields\", i) for i in unquoted_string\n- ]\n-\n- bool_query = Q(\"bool\", must=must)\n-\n- # Allow to optionally match on prefix\n- # if ``q`` is longer than one character.\n- if len(q) > 1:\n- bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n- return bool_query\n-\n-\n def filter_query(s):\n \"\"\"\n Filters given query with the below regex\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py\\n--- a/warehouse/search/queries.py\\n+++ b/warehouse/search/queries.py\\n@@ -52,36 +52,45 @@\\n \\\"\\\"\\\"\\n Returns an Elasticsearch query from data from the request.\\n \\\"\\\"\\\"\\n+ classifier_q = Q(\\n+ \\\"bool\\\",\\n+ # Theh results must have all selected classifiers\\n+ must=[\\n+ Q(\\n+ \\\"bool\\\",\\n+ should=[\\n+ # Term search for the exact classifier\\n+ Q(\\\"term\\\", classifiers=classifier),\\n+ # Prefix search for potential children classifiers\\n+ Q(\\\"prefix\\\", classifiers=classifier + \\\" :: \\\"),\\n+ ],\\n+ )\\n+ for classifier in classifiers\\n+ ],\\n+ )\\n if not terms:\\n- query = es.query()\\n+ query = es.query(classifier_q) if classifiers else es.query()\\n else:\\n- bool_query = gather_es_queries(terms)\\n+ quoted_string, unquoted_string = filter_query(terms)\\n+ bool_query = Q(\\n+ \\\"bool\\\",\\n+ must=[form_query(\\\"phrase\\\", i) for i in quoted_string]\\n+ + [form_query(\\\"best_fields\\\", i) for i in unquoted_string]\\n+ + ([classifier_q] if classifiers else []),\\n+ )\\n+\\n+ # Allow to optionally match on prefix\\n+ # if ``q`` is longer than one character.\\n+ if len(terms) > 1:\\n+ bool_query = bool_query | Q(\\\"prefix\\\", normalized_name=terms)\\n+\\n query = es.query(bool_query)\\n query = query.suggest(\\\"name_suggestion\\\", terms, term={\\\"field\\\": \\\"name\\\"})\\n \\n- # Require match to all specified classifiers\\n- for classifier in classifiers:\\n- query = query.query(\\\"prefix\\\", classifiers=classifier)\\n-\\n query = query_for_order(query, order)\\n return query\\n \\n \\n-def gather_es_queries(q):\\n- quoted_string, unquoted_string = filter_query(q)\\n- must = [form_query(\\\"phrase\\\", i) for i in quoted_string] + [\\n- form_query(\\\"best_fields\\\", i) for i in unquoted_string\\n- ]\\n-\\n- bool_query = Q(\\\"bool\\\", must=must)\\n-\\n- # Allow to optionally match on prefix\\n- # if ``q`` is longer than one character.\\n- if len(q) > 1:\\n- bool_query = bool_query | Q(\\\"prefix\\\", normalized_name=q)\\n- return bool_query\\n-\\n-\\n def filter_query(s):\\n \\\"\\\"\\\"\\n Filters given query with the below regex\\n\", \"issue\": \"Python 3.1 classifier filtering is broken\\nWhen [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.\\r\\n\\r\\n[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1\\r\\n\\n\", \"before_files\": [{\"content\": \"# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\nimport re\\n\\nfrom elasticsearch_dsl import Q\\n\\nSEARCH_FIELDS = [\\n \\\"author\\\",\\n \\\"author_email\\\",\\n \\\"description\\\",\\n \\\"download_url\\\",\\n \\\"home_page\\\",\\n \\\"keywords\\\",\\n \\\"license\\\",\\n \\\"maintainer\\\",\\n \\\"maintainer_email\\\",\\n \\\"normalized_name\\\",\\n \\\"platform\\\",\\n \\\"summary\\\",\\n]\\nSEARCH_BOOSTS = {\\n \\\"name\\\": 10,\\n \\\"normalized_name\\\": 10,\\n \\\"description\\\": 5,\\n \\\"keywords\\\": 5,\\n \\\"summary\\\": 5,\\n}\\nSEARCH_FILTER_ORDER = (\\n \\\"Framework\\\",\\n \\\"Topic\\\",\\n \\\"Development Status\\\",\\n \\\"License\\\",\\n \\\"Programming Language\\\",\\n \\\"Operating System\\\",\\n \\\"Environment\\\",\\n \\\"Intended Audience\\\",\\n \\\"Natural Language\\\",\\n)\\n\\n\\ndef get_es_query(es, terms, order, classifiers):\\n \\\"\\\"\\\"\\n Returns an Elasticsearch query from data from the request.\\n \\\"\\\"\\\"\\n if not terms:\\n query = es.query()\\n else:\\n bool_query = gather_es_queries(terms)\\n query = es.query(bool_query)\\n query = query.suggest(\\\"name_suggestion\\\", terms, term={\\\"field\\\": \\\"name\\\"})\\n\\n # Require match to all specified classifiers\\n for classifier in classifiers:\\n query = query.query(\\\"prefix\\\", classifiers=classifier)\\n\\n query = query_for_order(query, order)\\n return query\\n\\n\\ndef gather_es_queries(q):\\n quoted_string, unquoted_string = filter_query(q)\\n must = [form_query(\\\"phrase\\\", i) for i in quoted_string] + [\\n form_query(\\\"best_fields\\\", i) for i in unquoted_string\\n ]\\n\\n bool_query = Q(\\\"bool\\\", must=must)\\n\\n # Allow to optionally match on prefix\\n # if ``q`` is longer than one character.\\n if len(q) > 1:\\n bool_query = bool_query | Q(\\\"prefix\\\", normalized_name=q)\\n return bool_query\\n\\n\\ndef filter_query(s):\\n \\\"\\\"\\\"\\n Filters given query with the below regex\\n and returns lists of quoted and unquoted strings\\n \\\"\\\"\\\"\\n matches = re.findall(r'(?:\\\"([^\\\"]*)\\\")|([^\\\"]*)', s)\\n result_quoted = [t[0].strip() for t in matches if t[0]]\\n result_unquoted = [t[1].strip() for t in matches if t[1]]\\n return result_quoted, result_unquoted\\n\\n\\ndef form_query(query_type, query):\\n \\\"\\\"\\\"\\n Returns a multi match query\\n \\\"\\\"\\\"\\n fields = [\\n field + \\\"^\\\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\\n for field in SEARCH_FIELDS\\n ]\\n return Q(\\\"multi_match\\\", fields=fields, query=query, type=query_type)\\n\\n\\ndef query_for_order(query, order):\\n \\\"\\\"\\\"\\n Applies transformations on the ES query based on the search order.\\n\\n Order is assumed to be a string with the name of a field with an optional\\n hyphen to indicate descending sort order.\\n \\\"\\\"\\\"\\n if order == \\\"\\\": # relevance should not sort\\n return query\\n\\n field = order[order.find(\\\"-\\\") + 1 :]\\n sort_info = {\\n field: {\\n \\\"order\\\": \\\"desc\\\" if order.startswith(\\\"-\\\") else \\\"asc\\\",\\n \\\"unmapped_type\\\": \\\"long\\\",\\n }\\n }\\n query = query.sort(sort_info)\\n return query\\n\", \"path\": \"warehouse/search/queries.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1788,"string":"1,788"},"num_tokens_diff":{"kind":"number","value":561,"string":"561"}}},{"rowIdx":18127,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_29422"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"freedomofpress__securedrop-7035"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\ndetermine post-upgrade failure-mode for a SHA-1-signed submission key\n## Description\r\n\r\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\r\n\r\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\r\n\r\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\r\n\r\n## Possible approaches\r\n\r\n| Option | Documentation changes | Code changes | Implication |\r\n| --- | --- | --- | --- |\r\n| Fail open, but log | optional | ✓ | Admin must monitor logs and/or OSSEC alerts. |\r\n| Fail open, but document | ✓ | ✗ | Admin must monitor release notes or check documentation. |\r\n| Fail closed | optional | ✓[1] | Admin can contact us for help. |\r\n\r\n**Notes:**\r\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\n\n\n\n[start of securedrop/journalist.py]\n1 from encryption import EncryptionManager, GpgKeyNotFoundError\n2 from execution import asynchronous\n3 from journalist_app import create_app\n4 from models import Source\n5 from sdconfig import SecureDropConfig\n6 \n7 config = SecureDropConfig.get_current()\n8 # app is imported by journalist.wsgi\n9 app = create_app(config)\n10 \n11 \n12 @asynchronous\n13 def prime_keycache() -> None:\n14 \"\"\"Pre-load the source public keys into Redis.\"\"\"\n15 with app.app_context():\n16 encryption_mgr = EncryptionManager.get_default()\n17 for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n18 try:\n19 encryption_mgr.get_source_public_key(source.filesystem_id)\n20 except GpgKeyNotFoundError:\n21 pass\n22 \n23 \n24 prime_keycache()\n25 \n26 \n27 if __name__ == \"__main__\": # pragma: no cover\n28 debug = getattr(config, \"env\", \"prod\") != \"prod\"\n29 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n30 app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n31\n[end of securedrop/journalist.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -1,9 +1,13 @@\n+import sys\n+\n from encryption import EncryptionManager, GpgKeyNotFoundError\n from execution import asynchronous\n from journalist_app import create_app\n from models import Source\n from sdconfig import SecureDropConfig\n \n+import redwood\n+\n config = SecureDropConfig.get_current()\n # app is imported by journalist.wsgi\n app = create_app(config)\n@@ -21,10 +25,28 @@\n pass\n \n \n-prime_keycache()\n+def validate_journalist_key() -> None:\n+ \"\"\"Verify the journalist PGP key is valid\"\"\"\n+ encryption_mgr = EncryptionManager.get_default()\n+ # First check that we can read it\n+ try:\n+ journalist_key = encryption_mgr.get_journalist_public_key()\n+ except Exception as e:\n+ print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n+ sys.exit(1)\n+ # And then what we read is valid\n+ try:\n+ redwood.is_valid_public_key(journalist_key)\n+ except redwood.RedwoodError as e:\n+ print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\": # pragma: no cover\n+ validate_journalist_key()\n+ prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/securedrop/journalist.py b/securedrop/journalist.py\\n--- a/securedrop/journalist.py\\n+++ b/securedrop/journalist.py\\n@@ -1,9 +1,13 @@\\n+import sys\\n+\\n from encryption import EncryptionManager, GpgKeyNotFoundError\\n from execution import asynchronous\\n from journalist_app import create_app\\n from models import Source\\n from sdconfig import SecureDropConfig\\n \\n+import redwood\\n+\\n config = SecureDropConfig.get_current()\\n # app is imported by journalist.wsgi\\n app = create_app(config)\\n@@ -21,10 +25,28 @@\\n pass\\n \\n \\n-prime_keycache()\\n+def validate_journalist_key() -> None:\\n+ \\\"\\\"\\\"Verify the journalist PGP key is valid\\\"\\\"\\\"\\n+ encryption_mgr = EncryptionManager.get_default()\\n+ # First check that we can read it\\n+ try:\\n+ journalist_key = encryption_mgr.get_journalist_public_key()\\n+ except Exception as e:\\n+ print(f\\\"ERROR: Unable to read journalist public key: {e}\\\", file=sys.stderr)\\n+ app.logger.error(f\\\"ERROR: Unable to read journalist public key: {e}\\\")\\n+ sys.exit(1)\\n+ # And then what we read is valid\\n+ try:\\n+ redwood.is_valid_public_key(journalist_key)\\n+ except redwood.RedwoodError as e:\\n+ print(f\\\"ERROR: Journalist public key is not valid: {e}\\\", file=sys.stderr)\\n+ app.logger.error(f\\\"ERROR: Journalist public key is not valid: {e}\\\")\\n+ sys.exit(1)\\n \\n \\n if __name__ == \\\"__main__\\\": # pragma: no cover\\n+ validate_journalist_key()\\n+ prime_keycache()\\n debug = getattr(config, \\\"env\\\", \\\"prod\\\") != \\\"prod\\\"\\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\\n app.run(debug=debug, host=\\\"0.0.0.0\\\", port=8081)\\n\", \"issue\": \"determine post-upgrade failure-mode for a SHA-1-signed submission key\\n## Description\\r\\n\\r\\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\\r\\n\\r\\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\\r\\n\\r\\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\\r\\n\\r\\n## Possible approaches\\r\\n\\r\\n| Option | Documentation changes | Code changes | Implication |\\r\\n| --- | --- | --- | --- |\\r\\n| Fail open, but log | optional | \\u2713 | Admin must monitor logs and/or OSSEC alerts. |\\r\\n| Fail open, but document | \\u2713 | \\u2717 | Admin must monitor release notes or check documentation. |\\r\\n| Fail closed | optional | \\u2713[1] | Admin can contact us for help. |\\r\\n\\r\\n**Notes:**\\r\\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\\n\", \"before_files\": [{\"content\": \"from encryption import EncryptionManager, GpgKeyNotFoundError\\nfrom execution import asynchronous\\nfrom journalist_app import create_app\\nfrom models import Source\\nfrom sdconfig import SecureDropConfig\\n\\nconfig = SecureDropConfig.get_current()\\n# app is imported by journalist.wsgi\\napp = create_app(config)\\n\\n\\n@asynchronous\\ndef prime_keycache() -> None:\\n \\\"\\\"\\\"Pre-load the source public keys into Redis.\\\"\\\"\\\"\\n with app.app_context():\\n encryption_mgr = EncryptionManager.get_default()\\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\\n try:\\n encryption_mgr.get_source_public_key(source.filesystem_id)\\n except GpgKeyNotFoundError:\\n pass\\n\\n\\nprime_keycache()\\n\\n\\nif __name__ == \\\"__main__\\\": # pragma: no cover\\n debug = getattr(config, \\\"env\\\", \\\"prod\\\") != \\\"prod\\\"\\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\\n app.run(debug=debug, host=\\\"0.0.0.0\\\", port=8081)\\n\", \"path\": \"securedrop/journalist.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1074,"string":"1,074"},"num_tokens_diff":{"kind":"number","value":440,"string":"440"}}},{"rowIdx":18128,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_7869"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"freedomofpress__securedrop-3756"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nBuilder image needs updates\n## Description\r\n\r\nsd-builder image needs to be updated.\r\n\r\n## Steps to Reproduce\r\n\r\n`make build-debs` and observe error.\r\n\r\n## Expected Behavior\r\n\r\n`make build-debs` should exit without error.\r\n\r\n## Actual Behavior\r\n\r\n`make-build-debs` returns an error, security updates are needed for the container.\r\n## Comments\r\nInstructions are available here:\r\nhttps://docs.securedrop.org/en/latest/development/dockerbuildmaint.html\n\n\n\n[start of securedrop/version.py]\n1 __version__ = '0.9.0~rc1'\n2\n[end of securedrop/version.py]\n[start of docs/conf.py]\n1 # -*- coding: utf-8 -*-\n2 #\n3 # SecureDrop documentation build configuration file, created by\n4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n5 #\n6 # This file is execfile()d with the current directory set to its\n7 # containing dir.\n8 #\n9 # Note that not all possible configuration values are present in this\n10 # autogenerated file.\n11 #\n12 # All configuration values have a default; values that are commented out\n13 # serve to show the default.\n14 \n15 import os\n16 \n17 # Detect if we're being built by Read the Docs\n18 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\n19 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n20 \n21 # If extensions (or modules to document with autodoc) are in another directory,\n22 # add these directories to sys.path here. If the directory is relative to the\n23 # documentation root, use os.path.abspath to make it absolute, like shown here.\n24 # sys.path.insert(0, os.path.abspath('.'))\n25 \n26 # -- General configuration ------------------------------------------------\n27 \n28 # If your documentation needs a minimal Sphinx version, state it here.\n29 # needs_sphinx = '1.0'\n30 \n31 # Add any Sphinx extension module names here, as strings. They can be\n32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n33 # ones.\n34 extensions = ['sphinx.ext.todo', ]\n35 \n36 # Add any paths that contain templates here, relative to this directory.\n37 templates_path = ['_templates']\n38 \n39 # The suffix(es) of source filenames.\n40 # You can specify multiple suffix as a list of string:\n41 # source_suffix = ['.rst', '.md']\n42 source_suffix = '.rst'\n43 \n44 # The encoding of source files.\n45 # source_encoding = 'utf-8-sig'\n46 \n47 # The master toctree document.\n48 master_doc = 'index'\n49 \n50 # General information about the project.\n51 project = u'SecureDrop'\n52 copyright = u'2017, Freedom of the Press Foundation'\n53 author = u'SecureDrop Team and Contributors'\n54 \n55 # The version info for the project you're documenting, acts as replacement for\n56 # |version| and |release|, also used in various other places throughout the\n57 # built documents.\n58 #\n59 # The short X.Y version.\n60 version = '0.9.0~rc1'\n61 # The full version, including alpha/beta/rc tags.\n62 release = '0.9.0~rc1'\n63 \n64 # The language for content autogenerated by Sphinx. Refer to documentation\n65 # for a list of supported languages.\n66 #\n67 # This is also used if you do content translation via gettext catalogs.\n68 # Usually you set \"language\" from the command line for these cases.\n69 language = None\n70 \n71 # There are two options for replacing |today|: either, you set today to some\n72 # non-false value, then it is used:\n73 # today = ''\n74 # Else, today_fmt is used as the format for a strftime call.\n75 # today_fmt = '%B %d, %Y'\n76 \n77 # List of patterns, relative to source directory, that match files and\n78 # directories to ignore when looking for source files.\n79 exclude_patterns = ['_build']\n80 \n81 # The reST default role (used for this markup: `text`) to use for all\n82 # documents.\n83 # default_role = None\n84 \n85 # If true, '()' will be appended to :func: etc. cross-reference text.\n86 # add_function_parentheses = True\n87 \n88 # If true, the current module name will be prepended to all description\n89 # unit titles (such as .. function::).\n90 # add_module_names = True\n91 \n92 # If true, sectionauthor and moduleauthor directives will be shown in the\n93 # output. They are ignored by default.\n94 # show_authors = False\n95 \n96 # The name of the Pygments (syntax highlighting) style to use.\n97 pygments_style = 'sphinx'\n98 \n99 # A list of ignored prefixes for module index sorting.\n100 # modindex_common_prefix = []\n101 \n102 # If true, keep warnings as \"system message\" paragraphs in the built documents.\n103 # keep_warnings = False\n104 \n105 # If true, `todo` and `todoList` produce output, else they produce nothing.\n106 todo_include_todos = False\n107 \n108 \n109 # -- Options for HTML output ----------------------------------------------\n110 \n111 # The theme to use for HTML and HTML Help pages. See the documentation for\n112 # a list of builtin themes.\n113 if on_rtd:\n114 html_theme = 'default'\n115 else:\n116 try:\n117 # If you want to build the docs locally using the RTD theme,\n118 # you may need to install it: ``pip install sphinx_rtd_theme``.\n119 # https://github.com/snide/sphinx_rtd_theme#via-package\n120 import sphinx_rtd_theme\n121 html_theme = \"sphinx_rtd_theme\"\n122 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n123 except ImportError:\n124 # This theme is included with Sphinx and is quite nice (based\n125 # on the Pocoo themes), but since we're using the RTD theme\n126 # for the production docs, it's best to use that to avoid\n127 # issues due to discrepancies between the themes.\n128 html_theme = 'alabaster'\n129 \n130 # Theme options are theme-specific and customize the look and feel of a theme\n131 # further. For a list of options available for each theme, see the\n132 # documentation.\n133 # html_theme_options = {}\n134 \n135 # Add any paths that contain custom themes here, relative to this directory.\n136 # html_theme_path = []\n137 \n138 # The name for this set of Sphinx documents. If None, it defaults to\n139 # \" v documentation\".\n140 # html_title = None\n141 \n142 # A shorter title for the navigation bar. Default is the same as html_title.\n143 # html_short_title = None\n144 \n145 # The name of an image file (relative to this directory) to place at the top\n146 # of the sidebar.\n147 html_logo = '../securedrop/static/i/favicon.png'\n148 \n149 # The name of an image file (within the static path) to use as favicon of the\n150 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n151 # pixels large.\n152 # html_favicon = None\n153 \n154 # Add any paths that contain custom static files (such as style sheets) here,\n155 # relative to this directory. They are copied after the builtin static files,\n156 # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n157 # html_static_path = ['_static']\n158 \n159 # Add any extra paths that contain custom files (such as robots.txt or\n160 # .htaccess) here, relative to this directory. These files are copied\n161 # directly to the root of the documentation.\n162 # html_extra_path = []\n163 \n164 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n165 # using the given strftime format.\n166 # html_last_updated_fmt = '%b %d, %Y'\n167 \n168 # If true, SmartyPants will be used to convert quotes and dashes to\n169 # typographically correct entities.\n170 # html_use_smartypants = True\n171 \n172 # Custom sidebar templates, maps document names to template names.\n173 # html_sidebars = {}\n174 \n175 # Additional templates that should be rendered to pages, maps page names to\n176 # template names.\n177 # html_additional_pages = {}\n178 \n179 # If false, no module index is generated.\n180 # html_domain_indices = True\n181 \n182 # If false, no index is generated.\n183 # html_use_index = True\n184 \n185 # If true, the index is split into individual pages for each letter.\n186 # html_split_index = False\n187 \n188 # If true, links to the reST sources are added to the pages.\n189 # html_show_sourcelink = True\n190 \n191 # If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n192 # html_show_sphinx = True\n193 \n194 # If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n195 # html_show_copyright = True\n196 \n197 # If true, an OpenSearch description file will be output, and all pages will\n198 # contain a tag referring to it. The value of this option must be the\n199 # base URL from which the finished HTML is served.\n200 # html_use_opensearch = ''\n201 \n202 # This is the file name suffix for HTML files (e.g. \".xhtml\").\n203 # html_file_suffix = None\n204 \n205 # Language to be used for generating the HTML full-text search index.\n206 # Sphinx supports the following languages:\n207 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n208 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n209 # html_search_language = 'en'\n210 \n211 # A dictionary with options for the search language support, empty by default.\n212 # Now only 'ja' uses this config value\n213 # html_search_options = {'type': 'default'}\n214 \n215 # The name of a javascript file (relative to the configuration directory) that\n216 # implements a search results scorer. If empty, the default will be used.\n217 # html_search_scorer = 'scorer.js'\n218 \n219 # Output file base name for HTML help builder.\n220 htmlhelp_basename = 'SecureDropdoc'\n221 \n222 # -- Options for LaTeX output ---------------------------------------------\n223 \n224 latex_elements = {\n225 # The paper size ('letterpaper' or 'a4paper').\n226 # 'papersize': 'letterpaper',\n227 \n228 # The font size ('10pt', '11pt' or '12pt').\n229 # 'pointsize': '10pt',\n230 \n231 # Additional stuff for the LaTeX preamble.\n232 # 'preamble': '',\n233 \n234 # Latex figure (float) alignment\n235 # 'figure_align': 'htbp',\n236 }\n237 \n238 # Grouping the document tree into LaTeX files. List of tuples\n239 # (source start file, target name, title,\n240 # author, documentclass [howto, manual, or own class]).\n241 latex_documents = [\n242 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n243 author, 'manual'),\n244 ]\n245 \n246 # The name of an image file (relative to this directory) to place at the top of\n247 # the title page.\n248 # latex_logo = None\n249 \n250 # For \"manual\" documents, if this is true, then toplevel headings are parts,\n251 # not chapters.\n252 # latex_use_parts = False\n253 \n254 # If true, show page references after internal links.\n255 # latex_show_pagerefs = False\n256 \n257 # If true, show URL addresses after external links.\n258 # latex_show_urls = False\n259 \n260 # Documents to append as an appendix to all manuals.\n261 # latex_appendices = []\n262 \n263 # If false, no module index is generated.\n264 # latex_domain_indices = True\n265 \n266 \n267 # -- Options for manual page output ---------------------------------------\n268 \n269 # One entry per manual page. List of tuples\n270 # (source start file, name, description, authors, manual section).\n271 man_pages = [\n272 (master_doc, 'securedrop', u'SecureDrop Documentation',\n273 [author], 1)\n274 ]\n275 \n276 # If true, show URL addresses after external links.\n277 # man_show_urls = False\n278 \n279 \n280 # -- Options for Texinfo output -------------------------------------------\n281 \n282 # Grouping the document tree into Texinfo files. List of tuples\n283 # (source start file, target name, title, author,\n284 # dir menu entry, description, category)\n285 texinfo_documents = [\n286 (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n287 author, 'SecureDrop', 'One line description of project.',\n288 'Miscellaneous'),\n289 ]\n290 \n291 # Documents to append as an appendix to all manuals.\n292 # texinfo_appendices = []\n293 \n294 # If false, no module index is generated.\n295 # texinfo_domain_indices = True\n296 \n297 # How to display URL addresses: 'footnote', 'no', or 'inline'.\n298 # texinfo_show_urls = 'footnote'\n299 \n300 # If true, do not generate a @detailmenu in the \"Top\" node's menu.\n301 # texinfo_no_detailmenu = False\n302\n[end of docs/conf.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -57,9 +57,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.9.0~rc1'\n+version = '0.9.0~rc2'\n # The full version, including alpha/beta/rc tags.\n-release = '0.9.0~rc1'\n+release = '0.9.0~rc2'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.9.0~rc1'\n+__version__ = '0.9.0~rc2'\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/docs/conf.py b/docs/conf.py\\n--- a/docs/conf.py\\n+++ b/docs/conf.py\\n@@ -57,9 +57,9 @@\\n # built documents.\\n #\\n # The short X.Y version.\\n-version = '0.9.0~rc1'\\n+version = '0.9.0~rc2'\\n # The full version, including alpha/beta/rc tags.\\n-release = '0.9.0~rc1'\\n+release = '0.9.0~rc2'\\n \\n # The language for content autogenerated by Sphinx. Refer to documentation\\n # for a list of supported languages.\\ndiff --git a/securedrop/version.py b/securedrop/version.py\\n--- a/securedrop/version.py\\n+++ b/securedrop/version.py\\n@@ -1 +1 @@\\n-__version__ = '0.9.0~rc1'\\n+__version__ = '0.9.0~rc2'\\n\", \"issue\": \"Builder image needs updates\\n## Description\\r\\n\\r\\nsd-builder image needs to be updated.\\r\\n\\r\\n## Steps to Reproduce\\r\\n\\r\\n`make build-debs` and observe error.\\r\\n\\r\\n## Expected Behavior\\r\\n\\r\\n`make build-debs` should exit without error.\\r\\n\\r\\n## Actual Behavior\\r\\n\\r\\n`make-build-debs` returns an error, security updates are needed for the container.\\r\\n## Comments\\r\\nInstructions are available here:\\r\\nhttps://docs.securedrop.org/en/latest/development/dockerbuildmaint.html\\n\", \"before_files\": [{\"content\": \"__version__ = '0.9.0~rc1'\\n\", \"path\": \"securedrop/version.py\"}, {\"content\": \"# -*- coding: utf-8 -*-\\n#\\n# SecureDrop documentation build configuration file, created by\\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\\n#\\n# This file is execfile()d with the current directory set to its\\n# containing dir.\\n#\\n# Note that not all possible configuration values are present in this\\n# autogenerated file.\\n#\\n# All configuration values have a default; values that are commented out\\n# serve to show the default.\\n\\nimport os\\n\\n# Detect if we're being built by Read the Docs\\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\\n\\n# If extensions (or modules to document with autodoc) are in another directory,\\n# add these directories to sys.path here. If the directory is relative to the\\n# documentation root, use os.path.abspath to make it absolute, like shown here.\\n# sys.path.insert(0, os.path.abspath('.'))\\n\\n# -- General configuration ------------------------------------------------\\n\\n# If your documentation needs a minimal Sphinx version, state it here.\\n# needs_sphinx = '1.0'\\n\\n# Add any Sphinx extension module names here, as strings. They can be\\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\\n# ones.\\nextensions = ['sphinx.ext.todo', ]\\n\\n# Add any paths that contain templates here, relative to this directory.\\ntemplates_path = ['_templates']\\n\\n# The suffix(es) of source filenames.\\n# You can specify multiple suffix as a list of string:\\n# source_suffix = ['.rst', '.md']\\nsource_suffix = '.rst'\\n\\n# The encoding of source files.\\n# source_encoding = 'utf-8-sig'\\n\\n# The master toctree document.\\nmaster_doc = 'index'\\n\\n# General information about the project.\\nproject = u'SecureDrop'\\ncopyright = u'2017, Freedom of the Press Foundation'\\nauthor = u'SecureDrop Team and Contributors'\\n\\n# The version info for the project you're documenting, acts as replacement for\\n# |version| and |release|, also used in various other places throughout the\\n# built documents.\\n#\\n# The short X.Y version.\\nversion = '0.9.0~rc1'\\n# The full version, including alpha/beta/rc tags.\\nrelease = '0.9.0~rc1'\\n\\n# The language for content autogenerated by Sphinx. Refer to documentation\\n# for a list of supported languages.\\n#\\n# This is also used if you do content translation via gettext catalogs.\\n# Usually you set \\\"language\\\" from the command line for these cases.\\nlanguage = None\\n\\n# There are two options for replacing |today|: either, you set today to some\\n# non-false value, then it is used:\\n# today = ''\\n# Else, today_fmt is used as the format for a strftime call.\\n# today_fmt = '%B %d, %Y'\\n\\n# List of patterns, relative to source directory, that match files and\\n# directories to ignore when looking for source files.\\nexclude_patterns = ['_build']\\n\\n# The reST default role (used for this markup: `text`) to use for all\\n# documents.\\n# default_role = None\\n\\n# If true, '()' will be appended to :func: etc. cross-reference text.\\n# add_function_parentheses = True\\n\\n# If true, the current module name will be prepended to all description\\n# unit titles (such as .. function::).\\n# add_module_names = True\\n\\n# If true, sectionauthor and moduleauthor directives will be shown in the\\n# output. They are ignored by default.\\n# show_authors = False\\n\\n# The name of the Pygments (syntax highlighting) style to use.\\npygments_style = 'sphinx'\\n\\n# A list of ignored prefixes for module index sorting.\\n# modindex_common_prefix = []\\n\\n# If true, keep warnings as \\\"system message\\\" paragraphs in the built documents.\\n# keep_warnings = False\\n\\n# If true, `todo` and `todoList` produce output, else they produce nothing.\\ntodo_include_todos = False\\n\\n\\n# -- Options for HTML output ----------------------------------------------\\n\\n# The theme to use for HTML and HTML Help pages. See the documentation for\\n# a list of builtin themes.\\nif on_rtd:\\n html_theme = 'default'\\nelse:\\n try:\\n # If you want to build the docs locally using the RTD theme,\\n # you may need to install it: ``pip install sphinx_rtd_theme``.\\n # https://github.com/snide/sphinx_rtd_theme#via-package\\n import sphinx_rtd_theme\\n html_theme = \\\"sphinx_rtd_theme\\\"\\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\\n except ImportError:\\n # This theme is included with Sphinx and is quite nice (based\\n # on the Pocoo themes), but since we're using the RTD theme\\n # for the production docs, it's best to use that to avoid\\n # issues due to discrepancies between the themes.\\n html_theme = 'alabaster'\\n\\n# Theme options are theme-specific and customize the look and feel of a theme\\n# further. For a list of options available for each theme, see the\\n# documentation.\\n# html_theme_options = {}\\n\\n# Add any paths that contain custom themes here, relative to this directory.\\n# html_theme_path = []\\n\\n# The name for this set of Sphinx documents. If None, it defaults to\\n# \\\" v documentation\\\".\\n# html_title = None\\n\\n# A shorter title for the navigation bar. Default is the same as html_title.\\n# html_short_title = None\\n\\n# The name of an image file (relative to this directory) to place at the top\\n# of the sidebar.\\nhtml_logo = '../securedrop/static/i/favicon.png'\\n\\n# The name of an image file (within the static path) to use as favicon of the\\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\\n# pixels large.\\n# html_favicon = None\\n\\n# Add any paths that contain custom static files (such as style sheets) here,\\n# relative to this directory. They are copied after the builtin static files,\\n# so a file named \\\"default.css\\\" will overwrite the builtin \\\"default.css\\\".\\n# html_static_path = ['_static']\\n\\n# Add any extra paths that contain custom files (such as robots.txt or\\n# .htaccess) here, relative to this directory. These files are copied\\n# directly to the root of the documentation.\\n# html_extra_path = []\\n\\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\\n# using the given strftime format.\\n# html_last_updated_fmt = '%b %d, %Y'\\n\\n# If true, SmartyPants will be used to convert quotes and dashes to\\n# typographically correct entities.\\n# html_use_smartypants = True\\n\\n# Custom sidebar templates, maps document names to template names.\\n# html_sidebars = {}\\n\\n# Additional templates that should be rendered to pages, maps page names to\\n# template names.\\n# html_additional_pages = {}\\n\\n# If false, no module index is generated.\\n# html_domain_indices = True\\n\\n# If false, no index is generated.\\n# html_use_index = True\\n\\n# If true, the index is split into individual pages for each letter.\\n# html_split_index = False\\n\\n# If true, links to the reST sources are added to the pages.\\n# html_show_sourcelink = True\\n\\n# If true, \\\"Created using Sphinx\\\" is shown in the HTML footer. Default is True.\\n# html_show_sphinx = True\\n\\n# If true, \\\"(C) Copyright ...\\\" is shown in the HTML footer. Default is True.\\n# html_show_copyright = True\\n\\n# If true, an OpenSearch description file will be output, and all pages will\\n# contain a tag referring to it. The value of this option must be the\\n# base URL from which the finished HTML is served.\\n# html_use_opensearch = ''\\n\\n# This is the file name suffix for HTML files (e.g. \\\".xhtml\\\").\\n# html_file_suffix = None\\n\\n# Language to be used for generating the HTML full-text search index.\\n# Sphinx supports the following languages:\\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\\n# html_search_language = 'en'\\n\\n# A dictionary with options for the search language support, empty by default.\\n# Now only 'ja' uses this config value\\n# html_search_options = {'type': 'default'}\\n\\n# The name of a javascript file (relative to the configuration directory) that\\n# implements a search results scorer. If empty, the default will be used.\\n# html_search_scorer = 'scorer.js'\\n\\n# Output file base name for HTML help builder.\\nhtmlhelp_basename = 'SecureDropdoc'\\n\\n# -- Options for LaTeX output ---------------------------------------------\\n\\nlatex_elements = {\\n # The paper size ('letterpaper' or 'a4paper').\\n # 'papersize': 'letterpaper',\\n\\n # The font size ('10pt', '11pt' or '12pt').\\n # 'pointsize': '10pt',\\n\\n # Additional stuff for the LaTeX preamble.\\n # 'preamble': '',\\n\\n # Latex figure (float) alignment\\n # 'figure_align': 'htbp',\\n}\\n\\n# Grouping the document tree into LaTeX files. List of tuples\\n# (source start file, target name, title,\\n# author, documentclass [howto, manual, or own class]).\\nlatex_documents = [\\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\\n author, 'manual'),\\n]\\n\\n# The name of an image file (relative to this directory) to place at the top of\\n# the title page.\\n# latex_logo = None\\n\\n# For \\\"manual\\\" documents, if this is true, then toplevel headings are parts,\\n# not chapters.\\n# latex_use_parts = False\\n\\n# If true, show page references after internal links.\\n# latex_show_pagerefs = False\\n\\n# If true, show URL addresses after external links.\\n# latex_show_urls = False\\n\\n# Documents to append as an appendix to all manuals.\\n# latex_appendices = []\\n\\n# If false, no module index is generated.\\n# latex_domain_indices = True\\n\\n\\n# -- Options for manual page output ---------------------------------------\\n\\n# One entry per manual page. List of tuples\\n# (source start file, name, description, authors, manual section).\\nman_pages = [\\n (master_doc, 'securedrop', u'SecureDrop Documentation',\\n [author], 1)\\n]\\n\\n# If true, show URL addresses after external links.\\n# man_show_urls = False\\n\\n\\n# -- Options for Texinfo output -------------------------------------------\\n\\n# Grouping the document tree into Texinfo files. List of tuples\\n# (source start file, target name, title, author,\\n# dir menu entry, description, category)\\ntexinfo_documents = [\\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\\n author, 'SecureDrop', 'One line description of project.',\\n 'Miscellaneous'),\\n]\\n\\n# Documents to append as an appendix to all manuals.\\n# texinfo_appendices = []\\n\\n# If false, no module index is generated.\\n# texinfo_domain_indices = True\\n\\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\\n# texinfo_show_urls = 'footnote'\\n\\n# If true, do not generate a @detailmenu in the \\\"Top\\\" node's menu.\\n# texinfo_no_detailmenu = False\\n\", \"path\": \"docs/conf.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":4023,"string":"4,023"},"num_tokens_diff":{"kind":"number","value":200,"string":"200"}}},{"rowIdx":18129,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_12398"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pre-commit__pre-commit-1590"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nimprove `healthy()` check for node\nSee pre-commit/actions#45\r\n\r\nfor `language_version: system` hooks this looks like:\r\n\r\n```\r\neslint...................................................................Failed\r\n- hook id: eslint\r\n- exit code: 127\r\n\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n\r\n##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1\r\n```\r\n\r\n\n\n\n\n[start of pre_commit/languages/node.py]\n1 import contextlib\n2 import functools\n3 import os\n4 import sys\n5 from typing import Generator\n6 from typing import Sequence\n7 from typing import Tuple\n8 \n9 import pre_commit.constants as C\n10 from pre_commit import parse_shebang\n11 from pre_commit.envcontext import envcontext\n12 from pre_commit.envcontext import PatchesT\n13 from pre_commit.envcontext import UNSET\n14 from pre_commit.envcontext import Var\n15 from pre_commit.hook import Hook\n16 from pre_commit.languages import helpers\n17 from pre_commit.languages.python import bin_dir\n18 from pre_commit.prefix import Prefix\n19 from pre_commit.util import clean_path_on_failure\n20 from pre_commit.util import cmd_output\n21 from pre_commit.util import cmd_output_b\n22 \n23 ENVIRONMENT_DIR = 'node_env'\n24 healthy = helpers.basic_healthy\n25 \n26 \n27 @functools.lru_cache(maxsize=1)\n28 def get_default_version() -> str:\n29 # nodeenv does not yet support `-n system` on windows\n30 if sys.platform == 'win32':\n31 return C.DEFAULT\n32 # if node is already installed, we can save a bunch of setup time by\n33 # using the installed version\n34 elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n35 return 'system'\n36 else:\n37 return C.DEFAULT\n38 \n39 \n40 def _envdir(prefix: Prefix, version: str) -> str:\n41 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n42 return prefix.path(directory)\n43 \n44 \n45 def get_env_patch(venv: str) -> PatchesT:\n46 if sys.platform == 'cygwin': # pragma: no cover\n47 _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n48 install_prefix = fr'{win_venv.strip()}\\bin'\n49 lib_dir = 'lib'\n50 elif sys.platform == 'win32': # pragma: no cover\n51 install_prefix = bin_dir(venv)\n52 lib_dir = 'Scripts'\n53 else: # pragma: win32 no cover\n54 install_prefix = venv\n55 lib_dir = 'lib'\n56 return (\n57 ('NODE_VIRTUAL_ENV', venv),\n58 ('NPM_CONFIG_PREFIX', install_prefix),\n59 ('npm_config_prefix', install_prefix),\n60 ('NPM_CONFIG_USERCONFIG', UNSET),\n61 ('npm_config_userconfig', UNSET),\n62 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n63 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n64 )\n65 \n66 \n67 @contextlib.contextmanager\n68 def in_env(\n69 prefix: Prefix,\n70 language_version: str,\n71 ) -> Generator[None, None, None]:\n72 with envcontext(get_env_patch(_envdir(prefix, language_version))):\n73 yield\n74 \n75 \n76 def install_environment(\n77 prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n78 ) -> None:\n79 additional_dependencies = tuple(additional_dependencies)\n80 assert prefix.exists('package.json')\n81 envdir = _envdir(prefix, version)\n82 \n83 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n84 if sys.platform == 'win32': # pragma: no cover\n85 envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n86 with clean_path_on_failure(envdir):\n87 cmd = [\n88 sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n89 ]\n90 if version != C.DEFAULT:\n91 cmd.extend(['-n', version])\n92 cmd_output_b(*cmd)\n93 \n94 with in_env(prefix, version):\n95 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n96 # install as if we installed from git\n97 helpers.run_setup_cmd(prefix, ('npm', 'install'))\n98 helpers.run_setup_cmd(\n99 prefix,\n100 ('npm', 'install', '-g', '.', *additional_dependencies),\n101 )\n102 \n103 \n104 def run_hook(\n105 hook: Hook,\n106 file_args: Sequence[str],\n107 color: bool,\n108 ) -> Tuple[int, bytes]:\n109 with in_env(hook.prefix, hook.language_version):\n110 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n111\n[end of pre_commit/languages/node.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -21,7 +21,6 @@\n from pre_commit.util import cmd_output_b\n \n ENVIRONMENT_DIR = 'node_env'\n-healthy = helpers.basic_healthy\n \n \n @functools.lru_cache(maxsize=1)\n@@ -73,6 +72,12 @@\n yield\n \n \n+def healthy(prefix: Prefix, language_version: str) -> bool:\n+ with in_env(prefix, language_version):\n+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\n+ return retcode == 0\n+\n+\n def install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n ) -> None:\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\\n--- a/pre_commit/languages/node.py\\n+++ b/pre_commit/languages/node.py\\n@@ -21,7 +21,6 @@\\n from pre_commit.util import cmd_output_b\\n \\n ENVIRONMENT_DIR = 'node_env'\\n-healthy = helpers.basic_healthy\\n \\n \\n @functools.lru_cache(maxsize=1)\\n@@ -73,6 +72,12 @@\\n yield\\n \\n \\n+def healthy(prefix: Prefix, language_version: str) -> bool:\\n+ with in_env(prefix, language_version):\\n+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\\n+ return retcode == 0\\n+\\n+\\n def install_environment(\\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\\n ) -> None:\\n\", \"issue\": \"improve `healthy()` check for node\\nSee pre-commit/actions#45\\r\\n\\r\\nfor `language_version: system` hooks this looks like:\\r\\n\\r\\n```\\r\\neslint...................................................................Failed\\r\\n- hook id: eslint\\r\\n- exit code: 127\\r\\n\\r\\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\\r\\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\\r\\n\\r\\n##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1\\r\\n```\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"import contextlib\\nimport functools\\nimport os\\nimport sys\\nfrom typing import Generator\\nfrom typing import Sequence\\nfrom typing import Tuple\\n\\nimport pre_commit.constants as C\\nfrom pre_commit import parse_shebang\\nfrom pre_commit.envcontext import envcontext\\nfrom pre_commit.envcontext import PatchesT\\nfrom pre_commit.envcontext import UNSET\\nfrom pre_commit.envcontext import Var\\nfrom pre_commit.hook import Hook\\nfrom pre_commit.languages import helpers\\nfrom pre_commit.languages.python import bin_dir\\nfrom pre_commit.prefix import Prefix\\nfrom pre_commit.util import clean_path_on_failure\\nfrom pre_commit.util import cmd_output\\nfrom pre_commit.util import cmd_output_b\\n\\nENVIRONMENT_DIR = 'node_env'\\nhealthy = helpers.basic_healthy\\n\\n\\n@functools.lru_cache(maxsize=1)\\ndef get_default_version() -> str:\\n # nodeenv does not yet support `-n system` on windows\\n if sys.platform == 'win32':\\n return C.DEFAULT\\n # if node is already installed, we can save a bunch of setup time by\\n # using the installed version\\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\\n return 'system'\\n else:\\n return C.DEFAULT\\n\\n\\ndef _envdir(prefix: Prefix, version: str) -> str:\\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\\n return prefix.path(directory)\\n\\n\\ndef get_env_patch(venv: str) -> PatchesT:\\n if sys.platform == 'cygwin': # pragma: no cover\\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\\n install_prefix = fr'{win_venv.strip()}\\\\bin'\\n lib_dir = 'lib'\\n elif sys.platform == 'win32': # pragma: no cover\\n install_prefix = bin_dir(venv)\\n lib_dir = 'Scripts'\\n else: # pragma: win32 no cover\\n install_prefix = venv\\n lib_dir = 'lib'\\n return (\\n ('NODE_VIRTUAL_ENV', venv),\\n ('NPM_CONFIG_PREFIX', install_prefix),\\n ('npm_config_prefix', install_prefix),\\n ('NPM_CONFIG_USERCONFIG', UNSET),\\n ('npm_config_userconfig', UNSET),\\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\\n )\\n\\n\\n@contextlib.contextmanager\\ndef in_env(\\n prefix: Prefix,\\n language_version: str,\\n) -> Generator[None, None, None]:\\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\\n yield\\n\\n\\ndef install_environment(\\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\\n) -> None:\\n additional_dependencies = tuple(additional_dependencies)\\n assert prefix.exists('package.json')\\n envdir = _envdir(prefix, version)\\n\\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\\n if sys.platform == 'win32': # pragma: no cover\\n envdir = fr'\\\\\\\\?\\\\{os.path.normpath(envdir)}'\\n with clean_path_on_failure(envdir):\\n cmd = [\\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\\n ]\\n if version != C.DEFAULT:\\n cmd.extend(['-n', version])\\n cmd_output_b(*cmd)\\n\\n with in_env(prefix, version):\\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\\n # install as if we installed from git\\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\\n helpers.run_setup_cmd(\\n prefix,\\n ('npm', 'install', '-g', '.', *additional_dependencies),\\n )\\n\\n\\ndef run_hook(\\n hook: Hook,\\n file_args: Sequence[str],\\n color: bool,\\n) -> Tuple[int, bytes]:\\n with in_env(hook.prefix, hook.language_version):\\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\\n\", \"path\": \"pre_commit/languages/node.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1885,"string":"1,885"},"num_tokens_diff":{"kind":"number","value":183,"string":"183"}}},{"rowIdx":18130,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_1654"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"open-telemetry__opentelemetry-python-contrib-1515"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nAdd readthedocs documentation for remoulade instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n\n\n\n[start of instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py]\n1 # Copyright The OpenTelemetry Authors\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 \n15 \"\"\"\n16 Usage\n17 -----\n18 \n19 * Start broker backend\n20 \n21 ::\n22 \n23 docker run -p 5672:5672 rabbitmq\n24 \n25 * Run instrumented actor\n26 \n27 .. code-block:: python\n28 \n29 from remoulade.brokers.rabbitmq import RabbitmqBroker\n30 import remoulade\n31 \n32 RemouladeInstrumentor().instrument()\n33 \n34 broker = RabbitmqBroker()\n35 remoulade.set_broker(broker)\n36 \n37 @remoulade.actor\n38 def multiply(x, y):\n39 return x * y\n40 \n41 broker.declare_actor(count_words)\n42 \n43 multiply.send(43, 51)\n44 \n45 \"\"\"\n46 from typing import Collection\n47 \n48 from remoulade import Middleware, broker\n49 \n50 from opentelemetry import trace\n51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor\n52 from opentelemetry.instrumentation.remoulade import utils\n53 from opentelemetry.instrumentation.remoulade.package import _instruments\n54 from opentelemetry.instrumentation.remoulade.version import __version__\n55 from opentelemetry.propagate import extract, inject\n56 from opentelemetry.semconv.trace import SpanAttributes\n57 \n58 _REMOULADE_MESSAGE_TAG_KEY = \"remoulade.action\"\n59 _REMOULADE_MESSAGE_SEND = \"send\"\n60 _REMOULADE_MESSAGE_RUN = \"run\"\n61 \n62 _REMOULADE_MESSAGE_NAME_KEY = \"remoulade.actor_name\"\n63 \n64 _REMOULADE_MESSAGE_RETRY_COUNT_KEY = \"remoulade.retry_count\"\n65 \n66 \n67 class _InstrumentationMiddleware(Middleware):\n68 def __init__(self, _tracer):\n69 self._tracer = _tracer\n70 self._span_registry = {}\n71 \n72 def before_process_message(self, _broker, message):\n73 if \"trace_ctx\" not in message.options:\n74 return\n75 \n76 trace_ctx = extract(message.options[\"trace_ctx\"])\n77 retry_count = message.options.get(\"retries\", 0)\n78 operation_name = utils.get_operation_name(\n79 \"before_process_message\", retry_count\n80 )\n81 span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n82 \n83 span = self._tracer.start_span(\n84 operation_name,\n85 kind=trace.SpanKind.CONSUMER,\n86 context=trace_ctx,\n87 attributes=span_attributes,\n88 )\n89 \n90 activation = trace.use_span(span, end_on_exit=True)\n91 activation.__enter__() # pylint: disable=E1101\n92 \n93 utils.attach_span(\n94 self._span_registry, message.message_id, (span, activation)\n95 )\n96 \n97 def after_process_message(\n98 self, _broker, message, *, result=None, exception=None\n99 ):\n100 span, activation = utils.retrieve_span(\n101 self._span_registry, message.message_id\n102 )\n103 \n104 if span is None:\n105 # no existing span found for message_id\n106 return\n107 \n108 if span.is_recording():\n109 span.set_attributes(\n110 {\n111 _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\n112 _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n113 SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n114 }\n115 )\n116 \n117 activation.__exit__(None, None, None)\n118 utils.detach_span(self._span_registry, message.message_id)\n119 \n120 def before_enqueue(self, _broker, message, delay):\n121 retry_count = message.options.get(\"retries\", 0)\n122 operation_name = utils.get_operation_name(\n123 \"before_enqueue\", retry_count\n124 )\n125 span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n126 \n127 span = self._tracer.start_span(\n128 operation_name,\n129 kind=trace.SpanKind.PRODUCER,\n130 attributes=span_attributes,\n131 )\n132 \n133 if span.is_recording():\n134 span.set_attributes(\n135 {\n136 _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\n137 _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n138 SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n139 }\n140 )\n141 \n142 activation = trace.use_span(span, end_on_exit=True)\n143 activation.__enter__() # pylint: disable=E1101\n144 \n145 utils.attach_span(\n146 self._span_registry,\n147 message.message_id,\n148 (span, activation),\n149 is_publish=True,\n150 )\n151 \n152 if \"trace_ctx\" not in message.options:\n153 message.options[\"trace_ctx\"] = {}\n154 inject(message.options[\"trace_ctx\"])\n155 \n156 def after_enqueue(self, _broker, message, delay, exception=None):\n157 _, activation = utils.retrieve_span(\n158 self._span_registry, message.message_id, is_publish=True\n159 )\n160 \n161 if activation is None:\n162 # no existing span found for message_id\n163 return\n164 \n165 activation.__exit__(None, None, None)\n166 utils.detach_span(\n167 self._span_registry, message.message_id, is_publish=True\n168 )\n169 \n170 \n171 class RemouladeInstrumentor(BaseInstrumentor):\n172 def instrumentation_dependencies(self) -> Collection[str]:\n173 return _instruments\n174 \n175 def _instrument(self, **kwargs):\n176 tracer_provider = kwargs.get(\"tracer_provider\")\n177 \n178 # pylint: disable=attribute-defined-outside-init\n179 self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n180 instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\n181 \n182 broker.add_extra_default_middleware(instrumentation_middleware)\n183 \n184 def _uninstrument(self, **kwargs):\n185 broker.remove_extra_default_middleware(_InstrumentationMiddleware)\n186\n[end of instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n@@ -16,13 +16,13 @@\n Usage\n -----\n \n-* Start broker backend\n+Start broker backend\n \n ::\n \n docker run -p 5672:5672 rabbitmq\n \n-* Run instrumented actor\n+Run instrumented actor\n \n .. code-block:: python\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\\n--- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\\n+++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\\n@@ -16,13 +16,13 @@\\n Usage\\n -----\\n \\n-* Start broker backend\\n+Start broker backend\\n \\n ::\\n \\n docker run -p 5672:5672 rabbitmq\\n \\n-* Run instrumented actor\\n+Run instrumented actor\\n \\n .. code-block:: python\\n\", \"issue\": \"Add readthedocs documentation for remoulade instrumentation\\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\\n\", \"before_files\": [{\"content\": \"# Copyright The OpenTelemetry Authors\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\n\\\"\\\"\\\"\\nUsage\\n-----\\n\\n* Start broker backend\\n\\n::\\n\\n docker run -p 5672:5672 rabbitmq\\n\\n* Run instrumented actor\\n\\n.. code-block:: python\\n\\n from remoulade.brokers.rabbitmq import RabbitmqBroker\\n import remoulade\\n\\n RemouladeInstrumentor().instrument()\\n\\n broker = RabbitmqBroker()\\n remoulade.set_broker(broker)\\n\\n @remoulade.actor\\n def multiply(x, y):\\n return x * y\\n\\n broker.declare_actor(count_words)\\n\\n multiply.send(43, 51)\\n\\n\\\"\\\"\\\"\\nfrom typing import Collection\\n\\nfrom remoulade import Middleware, broker\\n\\nfrom opentelemetry import trace\\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\\nfrom opentelemetry.instrumentation.remoulade import utils\\nfrom opentelemetry.instrumentation.remoulade.package import _instruments\\nfrom opentelemetry.instrumentation.remoulade.version import __version__\\nfrom opentelemetry.propagate import extract, inject\\nfrom opentelemetry.semconv.trace import SpanAttributes\\n\\n_REMOULADE_MESSAGE_TAG_KEY = \\\"remoulade.action\\\"\\n_REMOULADE_MESSAGE_SEND = \\\"send\\\"\\n_REMOULADE_MESSAGE_RUN = \\\"run\\\"\\n\\n_REMOULADE_MESSAGE_NAME_KEY = \\\"remoulade.actor_name\\\"\\n\\n_REMOULADE_MESSAGE_RETRY_COUNT_KEY = \\\"remoulade.retry_count\\\"\\n\\n\\nclass _InstrumentationMiddleware(Middleware):\\n def __init__(self, _tracer):\\n self._tracer = _tracer\\n self._span_registry = {}\\n\\n def before_process_message(self, _broker, message):\\n if \\\"trace_ctx\\\" not in message.options:\\n return\\n\\n trace_ctx = extract(message.options[\\\"trace_ctx\\\"])\\n retry_count = message.options.get(\\\"retries\\\", 0)\\n operation_name = utils.get_operation_name(\\n \\\"before_process_message\\\", retry_count\\n )\\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\\n\\n span = self._tracer.start_span(\\n operation_name,\\n kind=trace.SpanKind.CONSUMER,\\n context=trace_ctx,\\n attributes=span_attributes,\\n )\\n\\n activation = trace.use_span(span, end_on_exit=True)\\n activation.__enter__() # pylint: disable=E1101\\n\\n utils.attach_span(\\n self._span_registry, message.message_id, (span, activation)\\n )\\n\\n def after_process_message(\\n self, _broker, message, *, result=None, exception=None\\n ):\\n span, activation = utils.retrieve_span(\\n self._span_registry, message.message_id\\n )\\n\\n if span is None:\\n # no existing span found for message_id\\n return\\n\\n if span.is_recording():\\n span.set_attributes(\\n {\\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\\n }\\n )\\n\\n activation.__exit__(None, None, None)\\n utils.detach_span(self._span_registry, message.message_id)\\n\\n def before_enqueue(self, _broker, message, delay):\\n retry_count = message.options.get(\\\"retries\\\", 0)\\n operation_name = utils.get_operation_name(\\n \\\"before_enqueue\\\", retry_count\\n )\\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\\n\\n span = self._tracer.start_span(\\n operation_name,\\n kind=trace.SpanKind.PRODUCER,\\n attributes=span_attributes,\\n )\\n\\n if span.is_recording():\\n span.set_attributes(\\n {\\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\\n }\\n )\\n\\n activation = trace.use_span(span, end_on_exit=True)\\n activation.__enter__() # pylint: disable=E1101\\n\\n utils.attach_span(\\n self._span_registry,\\n message.message_id,\\n (span, activation),\\n is_publish=True,\\n )\\n\\n if \\\"trace_ctx\\\" not in message.options:\\n message.options[\\\"trace_ctx\\\"] = {}\\n inject(message.options[\\\"trace_ctx\\\"])\\n\\n def after_enqueue(self, _broker, message, delay, exception=None):\\n _, activation = utils.retrieve_span(\\n self._span_registry, message.message_id, is_publish=True\\n )\\n\\n if activation is None:\\n # no existing span found for message_id\\n return\\n\\n activation.__exit__(None, None, None)\\n utils.detach_span(\\n self._span_registry, message.message_id, is_publish=True\\n )\\n\\n\\nclass RemouladeInstrumentor(BaseInstrumentor):\\n def instrumentation_dependencies(self) -> Collection[str]:\\n return _instruments\\n\\n def _instrument(self, **kwargs):\\n tracer_provider = kwargs.get(\\\"tracer_provider\\\")\\n\\n # pylint: disable=attribute-defined-outside-init\\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\\n instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\\n\\n broker.add_extra_default_middleware(instrumentation_middleware)\\n\\n def _uninstrument(self, **kwargs):\\n broker.remove_extra_default_middleware(_InstrumentationMiddleware)\\n\", \"path\": \"instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2345,"string":"2,345"},"num_tokens_diff":{"kind":"number","value":186,"string":"186"}}},{"rowIdx":18131,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_22938"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"bridgecrewio__checkov-215"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nDon't crash on IAM Policies that do not have an \"effect\"\n**Describe the bug**\r\n[`Effect`](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html#effect) is an optional token on AWS IAM policy documents. It will default to Allow.\r\nWhile it is perhaps good to be explicit, it should not crash Checkov.\r\n\r\n```\r\nERROR:checkov.terraform.checks.data.aws.AdminPolicyDocument:Failed to run check: Ensure IAM policies that allow full \"*-*\" administrative privileges are not created for configuration: {'statement': ....\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/checkov\", line 5, in \r\n run()\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/main.py\", line 47, in run\r\n scan_reports = runner_registry.run(root_folder, external_checks_dir=args.external_checks_dir, files=file)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\", line 20, in run\r\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 38, in run\r\n self.check_tf_definition(report, root_folder, tf_definitions)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 66, in check_tf_definition\r\n self.run_block(definition[1][block_type], definitions_context, full_file_path, report, scanned_file,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 88, in run_block\r\n results = registry.scan(scanned_file, entity,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 48, in scan\r\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 44, in run\r\n raise e\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 33, in run\r\n check_result['result'] = self.scan_entity_conf(entity_configuration)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/base_check.py\", line 19, in scan_entity_conf\r\n return self.scan_data_conf(conf)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\", line 23, in scan_data_conf\r\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\r\nKeyError: 'effect'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Write a terraform file:\r\n```\r\nprovider \"aws\" {\r\n region = \"us-east-1\"\r\n}\r\n\r\ndata \"aws_iam_policy_document\" \"test-policy\" {\r\n statement {\r\n actions = [\"s3:ListBucket\"]\r\n resources = [\"arn:aws:s3:::*\"]\r\n }\r\n}\r\n\r\nresource \"aws_iam_policy\" \"test-policy\" {\r\n name = \"test-policy\"\r\n policy = data.aws_iam_policy_document.test-policy.json\r\n}\r\n\r\n```\r\n\r\n**Expected behavior**\r\nThis should not crash\r\n\r\n**Additional context**\r\nWhen trying to debug this, I started getting a *different* error.\r\n```Traceback (most recent call last):\r\n File \"/home/matthew/.local/bin/checkov\", line 2, in \r\n from checkov.main import run\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/main.py\", line 5, in \r\n from checkov.common.util.docs_generator import print_checks\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/util/docs_generator.py\", line 5, in \r\n from checkov.terraform.checks.data.registry import data_registry\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/__init__.py\", line 1, in \r\n from checkov.terraform.checks.resource import *\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/__init__.py\", line 1, in \r\n from checkov.terraform.checks.resource.aws import *\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\", line 27, in \r\n check = AdminPolicyDocument()\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\", line 11, in __init__\r\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py\", line 18, in __init__\r\n resource_registry.register(self)\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/checks/base_check_registry.py\", line 16, in register\r\n for entity in check.supported_entities:\r\nAttributeError: 'AdminPolicyDocument' object has no attribute 'supported_entities'\r\n```\r\n\n\n\n\n[start of checkov/terraform/checks/data/aws/AdminPolicyDocument.py]\n1 from checkov.terraform.checks.data.base_check import BaseDataCheck\n2 from checkov.common.models.enums import CheckResult, CheckCategories\n3 \n4 \n5 class AdminPolicyDocument(BaseDataCheck):\n6 def __init__(self):\n7 name = \"Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created\"\n8 id = \"CKV_AWS_1\"\n9 supported_data = ['aws_iam_policy_document']\n10 categories = [CheckCategories.IAM]\n11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n12 \n13 def scan_data_conf(self, conf):\n14 \"\"\"\n15 validates iam policy document\n16 https://learn.hashicorp.com/terraform/aws/iam-policy\n17 :param conf: aws_kms_key configuration\n18 :return: \n19 \"\"\"\n20 key = 'statement'\n21 if key in conf.keys():\n22 for statement in conf[key]:\n23 if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\n24 return CheckResult.FAILED\n25 return CheckResult.PASSED\n26 \n27 \n28 check = AdminPolicyDocument()\n29\n[end of checkov/terraform/checks/data/aws/AdminPolicyDocument.py]\n[start of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py]\n1 from checkov.terraform.checks.data.base_check import BaseDataCheck\n2 from checkov.common.models.enums import CheckResult, CheckCategories\n3 \n4 \n5 class StarActionPolicyDocument(BaseDataCheck):\n6 def __init__(self):\n7 name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n8 id = \"CKV_AWS_49\"\n9 supported_data = ['aws_iam_policy_document']\n10 categories = [CheckCategories.IAM]\n11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n12 \n13 def scan_data_conf(self, conf):\n14 \"\"\"\n15 validates iam policy document\n16 https://learn.hashicorp.com/terraform/aws/iam-policy\n17 :param conf: aws_kms_key configuration\n18 :return: \n19 \"\"\"\n20 key = 'statement'\n21 if key in conf.keys():\n22 for statement in conf['statement']:\n23 if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \"Allow\":\n24 return CheckResult.FAILED\n25 return CheckResult.PASSED\n26 \n27 \n28 check = StarActionPolicyDocument()\n29\n[end of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n@@ -20,7 +20,8 @@\n key = 'statement'\n if key in conf.keys():\n for statement in conf[key]:\n- if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\n+ if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \\\n+ and '*' in statement['resources'][0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n \ndiff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n@@ -20,7 +20,7 @@\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n- if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \"Allow\":\n+ if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\\n--- a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\\n+++ b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\\n@@ -20,7 +20,8 @@\\n key = 'statement'\\n if key in conf.keys():\\n for statement in conf[key]:\\n- if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\\n+ if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \\\\\\n+ and '*' in statement['resources'][0]:\\n return CheckResult.FAILED\\n return CheckResult.PASSED\\n \\ndiff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\\n--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\\n+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\\n@@ -20,7 +20,7 @@\\n key = 'statement'\\n if key in conf.keys():\\n for statement in conf['statement']:\\n- if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \\\"Allow\\\":\\n+ if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\\n return CheckResult.FAILED\\n return CheckResult.PASSED\\n\", \"issue\": \"Don't crash on IAM Policies that do not have an \\\"effect\\\"\\n**Describe the bug**\\r\\n[`Effect`](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html#effect) is an optional token on AWS IAM policy documents. It will default to Allow.\\r\\nWhile it is perhaps good to be explicit, it should not crash Checkov.\\r\\n\\r\\n```\\r\\nERROR:checkov.terraform.checks.data.aws.AdminPolicyDocument:Failed to run check: Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created for configuration: {'statement': ....\\r\\nTraceback (most recent call last):\\r\\n File \\\"/usr/local/bin/checkov\\\", line 5, in \\r\\n run()\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/main.py\\\", line 47, in run\\r\\n scan_reports = runner_registry.run(root_folder, external_checks_dir=args.external_checks_dir, files=file)\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\\\", line 20, in run\\r\\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files)\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\\\", line 38, in run\\r\\n self.check_tf_definition(report, root_folder, tf_definitions)\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\\\", line 66, in check_tf_definition\\r\\n self.run_block(definition[1][block_type], definitions_context, full_file_path, report, scanned_file,\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\\\", line 88, in run_block\\r\\n results = registry.scan(scanned_file, entity,\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\\\", line 48, in scan\\r\\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\\\", line 44, in run\\r\\n raise e\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\\\", line 33, in run\\r\\n check_result['result'] = self.scan_entity_conf(entity_configuration)\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/base_check.py\\\", line 19, in scan_entity_conf\\r\\n return self.scan_data_conf(conf)\\r\\n File \\\"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\\\", line 23, in scan_data_conf\\r\\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\\r\\nKeyError: 'effect'\\r\\n```\\r\\n\\r\\n**To Reproduce**\\r\\nSteps to reproduce the behavior:\\r\\n1. Write a terraform file:\\r\\n```\\r\\nprovider \\\"aws\\\" {\\r\\n region = \\\"us-east-1\\\"\\r\\n}\\r\\n\\r\\ndata \\\"aws_iam_policy_document\\\" \\\"test-policy\\\" {\\r\\n statement {\\r\\n actions = [\\\"s3:ListBucket\\\"]\\r\\n resources = [\\\"arn:aws:s3:::*\\\"]\\r\\n }\\r\\n}\\r\\n\\r\\nresource \\\"aws_iam_policy\\\" \\\"test-policy\\\" {\\r\\n name = \\\"test-policy\\\"\\r\\n policy = data.aws_iam_policy_document.test-policy.json\\r\\n}\\r\\n\\r\\n```\\r\\n\\r\\n**Expected behavior**\\r\\nThis should not crash\\r\\n\\r\\n**Additional context**\\r\\nWhen trying to debug this, I started getting a *different* error.\\r\\n```Traceback (most recent call last):\\r\\n File \\\"/home/matthew/.local/bin/checkov\\\", line 2, in \\r\\n from checkov.main import run\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/main.py\\\", line 5, in \\r\\n from checkov.common.util.docs_generator import print_checks\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/util/docs_generator.py\\\", line 5, in \\r\\n from checkov.terraform.checks.data.registry import data_registry\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/__init__.py\\\", line 1, in \\r\\n from checkov.terraform.checks.resource import *\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/__init__.py\\\", line 1, in \\r\\n from checkov.terraform.checks.resource.aws import *\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\\\", line 27, in \\r\\n check = AdminPolicyDocument()\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\\\", line 11, in __init__\\r\\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py\\\", line 18, in __init__\\r\\n resource_registry.register(self)\\r\\n File \\\"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/checks/base_check_registry.py\\\", line 16, in register\\r\\n for entity in check.supported_entities:\\r\\nAttributeError: 'AdminPolicyDocument' object has no attribute 'supported_entities'\\r\\n```\\r\\n\\n\", \"before_files\": [{\"content\": \"from checkov.terraform.checks.data.base_check import BaseDataCheck\\nfrom checkov.common.models.enums import CheckResult, CheckCategories\\n\\n\\nclass AdminPolicyDocument(BaseDataCheck):\\n def __init__(self):\\n name = \\\"Ensure IAM policies that allow full \\\\\\\"*-*\\\\\\\" administrative privileges are not created\\\"\\n id = \\\"CKV_AWS_1\\\"\\n supported_data = ['aws_iam_policy_document']\\n categories = [CheckCategories.IAM]\\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\\n\\n def scan_data_conf(self, conf):\\n \\\"\\\"\\\"\\n validates iam policy document\\n https://learn.hashicorp.com/terraform/aws/iam-policy\\n :param conf: aws_kms_key configuration\\n :return: \\n \\\"\\\"\\\"\\n key = 'statement'\\n if key in conf.keys():\\n for statement in conf[key]:\\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\\n return CheckResult.FAILED\\n return CheckResult.PASSED\\n\\n\\ncheck = AdminPolicyDocument()\\n\", \"path\": \"checkov/terraform/checks/data/aws/AdminPolicyDocument.py\"}, {\"content\": \"from checkov.terraform.checks.data.base_check import BaseDataCheck\\nfrom checkov.common.models.enums import CheckResult, CheckCategories\\n\\n\\nclass StarActionPolicyDocument(BaseDataCheck):\\n def __init__(self):\\n name = \\\"Ensure no IAM policies documents allow \\\\\\\"*\\\\\\\" as a statement's actions\\\"\\n id = \\\"CKV_AWS_49\\\"\\n supported_data = ['aws_iam_policy_document']\\n categories = [CheckCategories.IAM]\\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\\n\\n def scan_data_conf(self, conf):\\n \\\"\\\"\\\"\\n validates iam policy document\\n https://learn.hashicorp.com/terraform/aws/iam-policy\\n :param conf: aws_kms_key configuration\\n :return: \\n \\\"\\\"\\\"\\n key = 'statement'\\n if key in conf.keys():\\n for statement in conf['statement']:\\n if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \\\"Allow\\\":\\n return CheckResult.FAILED\\n return CheckResult.PASSED\\n\\n\\ncheck = StarActionPolicyDocument()\\n\", \"path\": \"checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2356,"string":"2,356"},"num_tokens_diff":{"kind":"number","value":366,"string":"366"}}},{"rowIdx":18132,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_41694"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"deepset-ai__haystack-7994"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nAdd `max_retries` and `timeout` params to all `AzureOpenAI` classes\n**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.\r\n\r\nThe corresponding `AzureOpenAI` don't always have the same params.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIt would be nice to have these params in the `AzureOpenAI` classes\r\n\r\n**Describe alternatives you've considered**\r\n\r\nSubclass `AzureOpenAI` and create custom components.\r\n\r\n**Additional context**\r\n\r\ncc @anakin87 :)\n\n\n\n[start of haystack/components/embedders/azure_document_embedder.py]\n1 # SPDX-FileCopyrightText: 2022-present deepset GmbH \n2 #\n3 # SPDX-License-Identifier: Apache-2.0\n4 \n5 import os\n6 from typing import Any, Dict, List, Optional, Tuple\n7 \n8 from openai.lib.azure import AzureOpenAI\n9 from tqdm import tqdm\n10 \n11 from haystack import Document, component, default_from_dict, default_to_dict\n12 from haystack.utils import Secret, deserialize_secrets_inplace\n13 \n14 \n15 @component\n16 class AzureOpenAIDocumentEmbedder:\n17 \"\"\"\n18 A component for computing Document embeddings using OpenAI models on Azure.\n19 \n20 Usage example:\n21 ```python\n22 from haystack import Document\n23 from haystack.components.embedders import AzureOpenAIDocumentEmbedder\n24 \n25 doc = Document(content=\"I love pizza!\")\n26 \n27 document_embedder = AzureOpenAIDocumentEmbedder()\n28 \n29 result = document_embedder.run([doc])\n30 print(result['documents'][0].embedding)\n31 \n32 # [0.017020374536514282, -0.023255806416273117, ...]\n33 ```\n34 \"\"\"\n35 \n36 def __init__(\n37 self,\n38 azure_endpoint: Optional[str] = None,\n39 api_version: Optional[str] = \"2023-05-15\",\n40 azure_deployment: str = \"text-embedding-ada-002\",\n41 dimensions: Optional[int] = None,\n42 api_key: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_API_KEY\", strict=False),\n43 azure_ad_token: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_AD_TOKEN\", strict=False),\n44 organization: Optional[str] = None,\n45 prefix: str = \"\",\n46 suffix: str = \"\",\n47 batch_size: int = 32,\n48 progress_bar: bool = True,\n49 meta_fields_to_embed: Optional[List[str]] = None,\n50 embedding_separator: str = \"\\n\",\n51 ):\n52 \"\"\"\n53 Create an AzureOpenAIDocumentEmbedder component.\n54 \n55 :param azure_endpoint:\n56 The endpoint of the deployed model.\n57 :param api_version:\n58 The version of the API to use.\n59 :param azure_deployment:\n60 The deployment of the model, usually matches the model name.\n61 :param dimensions:\n62 The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3\n63 and later models.\n64 :param api_key:\n65 The API key used for authentication.\n66 :param azure_ad_token:\n67 Microsoft Entra ID token, see Microsoft's official\n68 [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)\n69 documentation for more information.\n70 Used to be called Azure Active Directory.\n71 :param organization:\n72 The Organization ID. See OpenAI's\n73 [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization)\n74 for more information.\n75 :param prefix:\n76 A string to add at the beginning of each text.\n77 :param suffix:\n78 A string to add at the end of each text.\n79 :param batch_size:\n80 Number of Documents to encode at once.\n81 :param progress_bar:\n82 If True shows a progress bar when running.\n83 :param meta_fields_to_embed:\n84 List of meta fields that will be embedded along with the Document text.\n85 :param embedding_separator:\n86 Separator used to concatenate the meta fields to the Document text.\n87 \"\"\"\n88 # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n89 azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n90 if not azure_endpoint:\n91 raise ValueError(\"Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.\")\n92 \n93 if api_key is None and azure_ad_token is None:\n94 raise ValueError(\"Please provide an API key or an Azure Active Directory token.\")\n95 \n96 self.api_key = api_key\n97 self.azure_ad_token = azure_ad_token\n98 self.api_version = api_version\n99 self.azure_endpoint = azure_endpoint\n100 self.azure_deployment = azure_deployment\n101 self.dimensions = dimensions\n102 self.organization = organization\n103 self.prefix = prefix\n104 self.suffix = suffix\n105 self.batch_size = batch_size\n106 self.progress_bar = progress_bar\n107 self.meta_fields_to_embed = meta_fields_to_embed or []\n108 self.embedding_separator = embedding_separator\n109 \n110 self._client = AzureOpenAI(\n111 api_version=api_version,\n112 azure_endpoint=azure_endpoint,\n113 azure_deployment=azure_deployment,\n114 api_key=api_key.resolve_value() if api_key is not None else None,\n115 azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n116 organization=organization,\n117 )\n118 \n119 def _get_telemetry_data(self) -> Dict[str, Any]:\n120 \"\"\"\n121 Data that is sent to Posthog for usage analytics.\n122 \"\"\"\n123 return {\"model\": self.azure_deployment}\n124 \n125 def to_dict(self) -> Dict[str, Any]:\n126 \"\"\"\n127 Serializes the component to a dictionary.\n128 \n129 :returns:\n130 Dictionary with serialized data.\n131 \"\"\"\n132 return default_to_dict(\n133 self,\n134 azure_endpoint=self.azure_endpoint,\n135 azure_deployment=self.azure_deployment,\n136 dimensions=self.dimensions,\n137 organization=self.organization,\n138 api_version=self.api_version,\n139 prefix=self.prefix,\n140 suffix=self.suffix,\n141 batch_size=self.batch_size,\n142 progress_bar=self.progress_bar,\n143 meta_fields_to_embed=self.meta_fields_to_embed,\n144 embedding_separator=self.embedding_separator,\n145 api_key=self.api_key.to_dict() if self.api_key is not None else None,\n146 azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n147 )\n148 \n149 @classmethod\n150 def from_dict(cls, data: Dict[str, Any]) -> \"AzureOpenAIDocumentEmbedder\":\n151 \"\"\"\n152 Deserializes the component from a dictionary.\n153 \n154 :param data:\n155 Dictionary to deserialize from.\n156 :returns:\n157 Deserialized component.\n158 \"\"\"\n159 deserialize_secrets_inplace(data[\"init_parameters\"], keys=[\"api_key\", \"azure_ad_token\"])\n160 return default_from_dict(cls, data)\n161 \n162 def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n163 \"\"\"\n164 Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n165 \"\"\"\n166 texts_to_embed = []\n167 for doc in documents:\n168 meta_values_to_embed = [\n169 str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n170 ]\n171 \n172 text_to_embed = (\n173 self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n174 ).replace(\"\\n\", \" \")\n175 \n176 texts_to_embed.append(text_to_embed)\n177 return texts_to_embed\n178 \n179 def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\n180 \"\"\"\n181 Embed a list of texts in batches.\n182 \"\"\"\n183 \n184 all_embeddings: List[List[float]] = []\n185 meta: Dict[str, Any] = {\"model\": \"\", \"usage\": {\"prompt_tokens\": 0, \"total_tokens\": 0}}\n186 for i in tqdm(range(0, len(texts_to_embed), batch_size), desc=\"Embedding Texts\"):\n187 batch = texts_to_embed[i : i + batch_size]\n188 if self.dimensions is not None:\n189 response = self._client.embeddings.create(\n190 model=self.azure_deployment, dimensions=self.dimensions, input=batch\n191 )\n192 else:\n193 response = self._client.embeddings.create(model=self.azure_deployment, input=batch)\n194 \n195 # Append embeddings to the list\n196 all_embeddings.extend(el.embedding for el in response.data)\n197 \n198 # Update the meta information only once if it's empty\n199 if not meta[\"model\"]:\n200 meta[\"model\"] = response.model\n201 meta[\"usage\"] = dict(response.usage)\n202 else:\n203 # Update the usage tokens\n204 meta[\"usage\"][\"prompt_tokens\"] += response.usage.prompt_tokens\n205 meta[\"usage\"][\"total_tokens\"] += response.usage.total_tokens\n206 \n207 return all_embeddings, meta\n208 \n209 @component.output_types(documents=List[Document], meta=Dict[str, Any])\n210 def run(self, documents: List[Document]) -> Dict[str, Any]:\n211 \"\"\"\n212 Embed a list of Documents.\n213 \n214 :param documents:\n215 Documents to embed.\n216 \n217 :returns:\n218 A dictionary with the following keys:\n219 - `documents`: Documents with embeddings\n220 - `meta`: Information about the usage of the model.\n221 \"\"\"\n222 if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)):\n223 raise TypeError(\"Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.\")\n224 \n225 texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n226 embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n227 \n228 # Assign the corresponding embeddings to each document\n229 for doc, emb in zip(documents, embeddings):\n230 doc.embedding = emb\n231 \n232 return {\"documents\": documents, \"meta\": meta}\n233\n[end of haystack/components/embedders/azure_document_embedder.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/haystack/components/embedders/azure_document_embedder.py b/haystack/components/embedders/azure_document_embedder.py\n--- a/haystack/components/embedders/azure_document_embedder.py\n+++ b/haystack/components/embedders/azure_document_embedder.py\n@@ -33,7 +33,7 @@\n ```\n \"\"\"\n \n- def __init__(\n+ def __init__( # noqa: PLR0913 (too-many-arguments)\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n@@ -48,6 +48,8 @@\n progress_bar: bool = True,\n meta_fields_to_embed: Optional[List[str]] = None,\n embedding_separator: str = \"\\n\",\n+ timeout: Optional[float] = None,\n+ max_retries: Optional[int] = None,\n ):\n \"\"\"\n Create an AzureOpenAIDocumentEmbedder component.\n@@ -84,6 +86,10 @@\n List of meta fields that will be embedded along with the Document text.\n :param embedding_separator:\n Separator used to concatenate the meta fields to the Document text.\n+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is\n+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.\n+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,\n+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.\n \"\"\"\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n@@ -106,6 +112,8 @@\n self.progress_bar = progress_bar\n self.meta_fields_to_embed = meta_fields_to_embed or []\n self.embedding_separator = embedding_separator\n+ self.timeout = timeout or float(os.environ.get(\"OPENAI_TIMEOUT\", 30.0))\n+ self.max_retries = max_retries or int(os.environ.get(\"OPENAI_MAX_RETRIES\", 5))\n \n self._client = AzureOpenAI(\n api_version=api_version,\n@@ -114,6 +122,8 @@\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n def _get_telemetry_data(self) -> Dict[str, Any]:\n@@ -144,6 +154,8 @@\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n @classmethod\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/haystack/components/embedders/azure_document_embedder.py b/haystack/components/embedders/azure_document_embedder.py\\n--- a/haystack/components/embedders/azure_document_embedder.py\\n+++ b/haystack/components/embedders/azure_document_embedder.py\\n@@ -33,7 +33,7 @@\\n ```\\n \\\"\\\"\\\"\\n \\n- def __init__(\\n+ def __init__( # noqa: PLR0913 (too-many-arguments)\\n self,\\n azure_endpoint: Optional[str] = None,\\n api_version: Optional[str] = \\\"2023-05-15\\\",\\n@@ -48,6 +48,8 @@\\n progress_bar: bool = True,\\n meta_fields_to_embed: Optional[List[str]] = None,\\n embedding_separator: str = \\\"\\\\n\\\",\\n+ timeout: Optional[float] = None,\\n+ max_retries: Optional[int] = None,\\n ):\\n \\\"\\\"\\\"\\n Create an AzureOpenAIDocumentEmbedder component.\\n@@ -84,6 +86,10 @@\\n List of meta fields that will be embedded along with the Document text.\\n :param embedding_separator:\\n Separator used to concatenate the meta fields to the Document text.\\n+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is\\n+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.\\n+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,\\n+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.\\n \\\"\\\"\\\"\\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\\n azure_endpoint = azure_endpoint or os.environ.get(\\\"AZURE_OPENAI_ENDPOINT\\\")\\n@@ -106,6 +112,8 @@\\n self.progress_bar = progress_bar\\n self.meta_fields_to_embed = meta_fields_to_embed or []\\n self.embedding_separator = embedding_separator\\n+ self.timeout = timeout or float(os.environ.get(\\\"OPENAI_TIMEOUT\\\", 30.0))\\n+ self.max_retries = max_retries or int(os.environ.get(\\\"OPENAI_MAX_RETRIES\\\", 5))\\n \\n self._client = AzureOpenAI(\\n api_version=api_version,\\n@@ -114,6 +122,8 @@\\n api_key=api_key.resolve_value() if api_key is not None else None,\\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\\n organization=organization,\\n+ timeout=self.timeout,\\n+ max_retries=self.max_retries,\\n )\\n \\n def _get_telemetry_data(self) -> Dict[str, Any]:\\n@@ -144,6 +154,8 @@\\n embedding_separator=self.embedding_separator,\\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\\n+ timeout=self.timeout,\\n+ max_retries=self.max_retries,\\n )\\n \\n @classmethod\\n\", \"issue\": \"Add `max_retries` and `timeout` params to all `AzureOpenAI` classes\\n**Is your feature request related to a problem? Please describe.**\\r\\n\\r\\nCurrently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.\\r\\n\\r\\nThe corresponding `AzureOpenAI` don't always have the same params.\\r\\n\\r\\n**Describe the solution you'd like**\\r\\n\\r\\nIt would be nice to have these params in the `AzureOpenAI` classes\\r\\n\\r\\n**Describe alternatives you've considered**\\r\\n\\r\\nSubclass `AzureOpenAI` and create custom components.\\r\\n\\r\\n**Additional context**\\r\\n\\r\\ncc @anakin87 :)\\n\", \"before_files\": [{\"content\": \"# SPDX-FileCopyrightText: 2022-present deepset GmbH \\n#\\n# SPDX-License-Identifier: Apache-2.0\\n\\nimport os\\nfrom typing import Any, Dict, List, Optional, Tuple\\n\\nfrom openai.lib.azure import AzureOpenAI\\nfrom tqdm import tqdm\\n\\nfrom haystack import Document, component, default_from_dict, default_to_dict\\nfrom haystack.utils import Secret, deserialize_secrets_inplace\\n\\n\\n@component\\nclass AzureOpenAIDocumentEmbedder:\\n \\\"\\\"\\\"\\n A component for computing Document embeddings using OpenAI models on Azure.\\n\\n Usage example:\\n ```python\\n from haystack import Document\\n from haystack.components.embedders import AzureOpenAIDocumentEmbedder\\n\\n doc = Document(content=\\\"I love pizza!\\\")\\n\\n document_embedder = AzureOpenAIDocumentEmbedder()\\n\\n result = document_embedder.run([doc])\\n print(result['documents'][0].embedding)\\n\\n # [0.017020374536514282, -0.023255806416273117, ...]\\n ```\\n \\\"\\\"\\\"\\n\\n def __init__(\\n self,\\n azure_endpoint: Optional[str] = None,\\n api_version: Optional[str] = \\\"2023-05-15\\\",\\n azure_deployment: str = \\\"text-embedding-ada-002\\\",\\n dimensions: Optional[int] = None,\\n api_key: Optional[Secret] = Secret.from_env_var(\\\"AZURE_OPENAI_API_KEY\\\", strict=False),\\n azure_ad_token: Optional[Secret] = Secret.from_env_var(\\\"AZURE_OPENAI_AD_TOKEN\\\", strict=False),\\n organization: Optional[str] = None,\\n prefix: str = \\\"\\\",\\n suffix: str = \\\"\\\",\\n batch_size: int = 32,\\n progress_bar: bool = True,\\n meta_fields_to_embed: Optional[List[str]] = None,\\n embedding_separator: str = \\\"\\\\n\\\",\\n ):\\n \\\"\\\"\\\"\\n Create an AzureOpenAIDocumentEmbedder component.\\n\\n :param azure_endpoint:\\n The endpoint of the deployed model.\\n :param api_version:\\n The version of the API to use.\\n :param azure_deployment:\\n The deployment of the model, usually matches the model name.\\n :param dimensions:\\n The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3\\n and later models.\\n :param api_key:\\n The API key used for authentication.\\n :param azure_ad_token:\\n Microsoft Entra ID token, see Microsoft's official\\n [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)\\n documentation for more information.\\n Used to be called Azure Active Directory.\\n :param organization:\\n The Organization ID. See OpenAI's\\n [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization)\\n for more information.\\n :param prefix:\\n A string to add at the beginning of each text.\\n :param suffix:\\n A string to add at the end of each text.\\n :param batch_size:\\n Number of Documents to encode at once.\\n :param progress_bar:\\n If True shows a progress bar when running.\\n :param meta_fields_to_embed:\\n List of meta fields that will be embedded along with the Document text.\\n :param embedding_separator:\\n Separator used to concatenate the meta fields to the Document text.\\n \\\"\\\"\\\"\\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\\n azure_endpoint = azure_endpoint or os.environ.get(\\\"AZURE_OPENAI_ENDPOINT\\\")\\n if not azure_endpoint:\\n raise ValueError(\\\"Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.\\\")\\n\\n if api_key is None and azure_ad_token is None:\\n raise ValueError(\\\"Please provide an API key or an Azure Active Directory token.\\\")\\n\\n self.api_key = api_key\\n self.azure_ad_token = azure_ad_token\\n self.api_version = api_version\\n self.azure_endpoint = azure_endpoint\\n self.azure_deployment = azure_deployment\\n self.dimensions = dimensions\\n self.organization = organization\\n self.prefix = prefix\\n self.suffix = suffix\\n self.batch_size = batch_size\\n self.progress_bar = progress_bar\\n self.meta_fields_to_embed = meta_fields_to_embed or []\\n self.embedding_separator = embedding_separator\\n\\n self._client = AzureOpenAI(\\n api_version=api_version,\\n azure_endpoint=azure_endpoint,\\n azure_deployment=azure_deployment,\\n api_key=api_key.resolve_value() if api_key is not None else None,\\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\\n organization=organization,\\n )\\n\\n def _get_telemetry_data(self) -> Dict[str, Any]:\\n \\\"\\\"\\\"\\n Data that is sent to Posthog for usage analytics.\\n \\\"\\\"\\\"\\n return {\\\"model\\\": self.azure_deployment}\\n\\n def to_dict(self) -> Dict[str, Any]:\\n \\\"\\\"\\\"\\n Serializes the component to a dictionary.\\n\\n :returns:\\n Dictionary with serialized data.\\n \\\"\\\"\\\"\\n return default_to_dict(\\n self,\\n azure_endpoint=self.azure_endpoint,\\n azure_deployment=self.azure_deployment,\\n dimensions=self.dimensions,\\n organization=self.organization,\\n api_version=self.api_version,\\n prefix=self.prefix,\\n suffix=self.suffix,\\n batch_size=self.batch_size,\\n progress_bar=self.progress_bar,\\n meta_fields_to_embed=self.meta_fields_to_embed,\\n embedding_separator=self.embedding_separator,\\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\\n )\\n\\n @classmethod\\n def from_dict(cls, data: Dict[str, Any]) -> \\\"AzureOpenAIDocumentEmbedder\\\":\\n \\\"\\\"\\\"\\n Deserializes the component from a dictionary.\\n\\n :param data:\\n Dictionary to deserialize from.\\n :returns:\\n Deserialized component.\\n \\\"\\\"\\\"\\n deserialize_secrets_inplace(data[\\\"init_parameters\\\"], keys=[\\\"api_key\\\", \\\"azure_ad_token\\\"])\\n return default_from_dict(cls, data)\\n\\n def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\\n \\\"\\\"\\\"\\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\\n \\\"\\\"\\\"\\n texts_to_embed = []\\n for doc in documents:\\n meta_values_to_embed = [\\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\\n ]\\n\\n text_to_embed = (\\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \\\"\\\"]) + self.suffix\\n ).replace(\\\"\\\\n\\\", \\\" \\\")\\n\\n texts_to_embed.append(text_to_embed)\\n return texts_to_embed\\n\\n def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\\n \\\"\\\"\\\"\\n Embed a list of texts in batches.\\n \\\"\\\"\\\"\\n\\n all_embeddings: List[List[float]] = []\\n meta: Dict[str, Any] = {\\\"model\\\": \\\"\\\", \\\"usage\\\": {\\\"prompt_tokens\\\": 0, \\\"total_tokens\\\": 0}}\\n for i in tqdm(range(0, len(texts_to_embed), batch_size), desc=\\\"Embedding Texts\\\"):\\n batch = texts_to_embed[i : i + batch_size]\\n if self.dimensions is not None:\\n response = self._client.embeddings.create(\\n model=self.azure_deployment, dimensions=self.dimensions, input=batch\\n )\\n else:\\n response = self._client.embeddings.create(model=self.azure_deployment, input=batch)\\n\\n # Append embeddings to the list\\n all_embeddings.extend(el.embedding for el in response.data)\\n\\n # Update the meta information only once if it's empty\\n if not meta[\\\"model\\\"]:\\n meta[\\\"model\\\"] = response.model\\n meta[\\\"usage\\\"] = dict(response.usage)\\n else:\\n # Update the usage tokens\\n meta[\\\"usage\\\"][\\\"prompt_tokens\\\"] += response.usage.prompt_tokens\\n meta[\\\"usage\\\"][\\\"total_tokens\\\"] += response.usage.total_tokens\\n\\n return all_embeddings, meta\\n\\n @component.output_types(documents=List[Document], meta=Dict[str, Any])\\n def run(self, documents: List[Document]) -> Dict[str, Any]:\\n \\\"\\\"\\\"\\n Embed a list of Documents.\\n\\n :param documents:\\n Documents to embed.\\n\\n :returns:\\n A dictionary with the following keys:\\n - `documents`: Documents with embeddings\\n - `meta`: Information about the usage of the model.\\n \\\"\\\"\\\"\\n if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)):\\n raise TypeError(\\\"Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.\\\")\\n\\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\\n\\n # Assign the corresponding embeddings to each document\\n for doc, emb in zip(documents, embeddings):\\n doc.embedding = emb\\n\\n return {\\\"documents\\\": documents, \\\"meta\\\": meta}\\n\", \"path\": \"haystack/components/embedders/azure_document_embedder.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3283,"string":"3,283"},"num_tokens_diff":{"kind":"number","value":690,"string":"690"}}},{"rowIdx":18133,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_16984"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"alltheplaces__alltheplaces-1872"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nSpider sallybeauty is broken\nDuring the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))\n\n\n\n[start of locations/spiders/sallybeauty.py]\n1 # -*- coding: utf-8 -*-\n2 import scrapy\n3 from locations.items import GeojsonPointItem\n4 from urllib.parse import urlencode\n5 import json\n6 import csv\n7 from locations.hours import OpeningHours\n8 from scrapy.selector import Selector\n9 \n10 \n11 class SallySpider(scrapy.Spider):\n12 name = \"sallybeauty\"\n13 item_attributes = { 'brand': \"Sally Beauty\" }\n14 allowed_domains = [\"sallybeauty.com\"]\n15 \n16 def start_requests(self):\n17 base_url = \"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\"\n18 \n19 point_files = [\n20 './locations/searchable_points/us_centroids_100mile_radius.csv',\n21 './locations/searchable_points/ca_centroids_100mile_radius.csv'\n22 ]\n23 \n24 params = {\n25 \"showmap\": \"true\",\n26 \"radius\": \"100\",\n27 }\n28 \n29 for point_file in point_files:\n30 with open(point_file) as points:\n31 next(points)\n32 for point in points:\n33 _, lat, lon = point.strip().split(',')\n34 params.update({\"lat\": lat, \"long\": lon})\n35 yield scrapy.Request(url=base_url + urlencode(params))\n36 \n37 def parse_hours(self, hours):\n38 hrs = Selector(text=hours)\n39 days = hrs.xpath('//div[@class=\"store-hours-day\"]/text()').extract()\n40 hours = hrs.xpath('//div[@class=\"store-hours-day\"]/span/text()').extract()\n41 \n42 opening_hours = OpeningHours()\n43 \n44 for d, h in zip(days, hours):\n45 try:\n46 day = d.strip(': ')\n47 open_time, close_time = h.split(' - ')\n48 open_time = open_time.lstrip('0')\n49 opening_hours.add_range(day=day[:2],\n50 open_time=open_time,\n51 close_time=close_time,\n52 time_format=\"%I:%M %p\")\n53 except:\n54 continue\n55 \n56 return opening_hours.as_opening_hours()\n57 \n58 def parse(self, response):\n59 jdata = json.loads(response.body_as_unicode())\n60 \n61 for row in jdata.get('stores',[]):\n62 \n63 properties = {\n64 'ref': row[\"ID\"],\n65 'name': row[\"name\"],\n66 'addr_full': \" \".join([row[\"address1\"], row.get(\"address2\", \"\") or \"\"]).strip(),\n67 'city': row[\"city\"],\n68 'postcode': row[\"postalCode\"],\n69 'lat': row[\"latitude\"],\n70 'lon': row[\"longitude\"],\n71 'phone': row[\"phone\"],\n72 'state': row[\"stateCode\"],\n73 }\n74 \n75 hours = self.parse_hours(row[\"storeHours\"])\n76 if hours:\n77 properties['opening_hours'] = hours\n78 \n79 yield GeojsonPointItem(**properties)\n80\n[end of locations/spiders/sallybeauty.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py\n--- a/locations/spiders/sallybeauty.py\n+++ b/locations/spiders/sallybeauty.py\n@@ -58,7 +58,7 @@\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n \n- for row in jdata.get('stores',[]):\n+ for row in jdata.get('stores', []):\n \n properties = {\n 'ref': row[\"ID\"],\n@@ -72,8 +72,11 @@\n 'state': row[\"stateCode\"],\n }\n \n- hours = self.parse_hours(row[\"storeHours\"])\n- if hours:\n- properties['opening_hours'] = hours\n+ store_hours = row.get(\"storeHours\")\n+ if store_hours:\n+ hours = self.parse_hours(store_hours)\n+\n+ if hours:\n+ properties['opening_hours'] = hours\n \n yield GeojsonPointItem(**properties)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py\\n--- a/locations/spiders/sallybeauty.py\\n+++ b/locations/spiders/sallybeauty.py\\n@@ -58,7 +58,7 @@\\n def parse(self, response):\\n jdata = json.loads(response.body_as_unicode())\\n \\n- for row in jdata.get('stores',[]):\\n+ for row in jdata.get('stores', []):\\n \\n properties = {\\n 'ref': row[\\\"ID\\\"],\\n@@ -72,8 +72,11 @@\\n 'state': row[\\\"stateCode\\\"],\\n }\\n \\n- hours = self.parse_hours(row[\\\"storeHours\\\"])\\n- if hours:\\n- properties['opening_hours'] = hours\\n+ store_hours = row.get(\\\"storeHours\\\")\\n+ if store_hours:\\n+ hours = self.parse_hours(store_hours)\\n+\\n+ if hours:\\n+ properties['opening_hours'] = hours\\n \\n yield GeojsonPointItem(**properties)\\n\", \"issue\": \"Spider sallybeauty is broken\\nDuring the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.\\n\\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))\\n\", \"before_files\": [{\"content\": \"# -*- coding: utf-8 -*-\\nimport scrapy\\nfrom locations.items import GeojsonPointItem\\nfrom urllib.parse import urlencode\\nimport json\\nimport csv\\nfrom locations.hours import OpeningHours\\nfrom scrapy.selector import Selector\\n\\n\\nclass SallySpider(scrapy.Spider):\\n name = \\\"sallybeauty\\\"\\n item_attributes = { 'brand': \\\"Sally Beauty\\\" }\\n allowed_domains = [\\\"sallybeauty.com\\\"]\\n\\n def start_requests(self):\\n base_url = \\\"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\\\"\\n\\n point_files = [\\n './locations/searchable_points/us_centroids_100mile_radius.csv',\\n './locations/searchable_points/ca_centroids_100mile_radius.csv'\\n ]\\n\\n params = {\\n \\\"showmap\\\": \\\"true\\\",\\n \\\"radius\\\": \\\"100\\\",\\n }\\n\\n for point_file in point_files:\\n with open(point_file) as points:\\n next(points)\\n for point in points:\\n _, lat, lon = point.strip().split(',')\\n params.update({\\\"lat\\\": lat, \\\"long\\\": lon})\\n yield scrapy.Request(url=base_url + urlencode(params))\\n\\n def parse_hours(self, hours):\\n hrs = Selector(text=hours)\\n days = hrs.xpath('//div[@class=\\\"store-hours-day\\\"]/text()').extract()\\n hours = hrs.xpath('//div[@class=\\\"store-hours-day\\\"]/span/text()').extract()\\n\\n opening_hours = OpeningHours()\\n\\n for d, h in zip(days, hours):\\n try:\\n day = d.strip(': ')\\n open_time, close_time = h.split(' - ')\\n open_time = open_time.lstrip('0')\\n opening_hours.add_range(day=day[:2],\\n open_time=open_time,\\n close_time=close_time,\\n time_format=\\\"%I:%M %p\\\")\\n except:\\n continue\\n\\n return opening_hours.as_opening_hours()\\n\\n def parse(self, response):\\n jdata = json.loads(response.body_as_unicode())\\n\\n for row in jdata.get('stores',[]):\\n\\n properties = {\\n 'ref': row[\\\"ID\\\"],\\n 'name': row[\\\"name\\\"],\\n 'addr_full': \\\" \\\".join([row[\\\"address1\\\"], row.get(\\\"address2\\\", \\\"\\\") or \\\"\\\"]).strip(),\\n 'city': row[\\\"city\\\"],\\n 'postcode': row[\\\"postalCode\\\"],\\n 'lat': row[\\\"latitude\\\"],\\n 'lon': row[\\\"longitude\\\"],\\n 'phone': row[\\\"phone\\\"],\\n 'state': row[\\\"stateCode\\\"],\\n }\\n\\n hours = self.parse_hours(row[\\\"storeHours\\\"])\\n if hours:\\n properties['opening_hours'] = hours\\n\\n yield GeojsonPointItem(**properties)\\n\", \"path\": \"locations/spiders/sallybeauty.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1466,"string":"1,466"},"num_tokens_diff":{"kind":"number","value":222,"string":"222"}}},{"rowIdx":18134,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_55584"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"wagtail__wagtail-1873"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nMigrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\nMigrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\n\n\n\n[start of wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py]\n1 # -*- coding: utf-8 -*-\n2 from __future__ import unicode_literals\n3 \n4 from django.db import models, migrations\n5 \n6 \n7 class Migration(migrations.Migration):\n8 \n9 dependencies = [\n10 ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n11 ]\n12 \n13 operations = [\n14 migrations.AlterField(\n15 model_name='grouppagepermission',\n16 name='permission_type',\n17 field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\n18 preserve_default=True,\n19 ),\n20 ]\n21\n[end of wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n@@ -7,7 +7,7 @@\n class Migration(migrations.Migration):\n \n dependencies = [\n- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),\n ]\n \n operations = [\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\\n--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\\n+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\\n@@ -7,7 +7,7 @@\\n class Migration(migrations.Migration):\\n \\n dependencies = [\\n- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\\n+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),\\n ]\\n \\n operations = [\\n\", \"issue\": \"Migrating to 1.1 Migration File Errors\\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\\n\\n```\\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\\n```\\n\\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\\n\\nI tried manually applying 0016, but the error is preventing that from happening.\\n\\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\\n\\nMigrating to 1.1 Migration File Errors\\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\\n\\n```\\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\\n```\\n\\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\\n\\nI tried manually applying 0016, but the error is preventing that from happening.\\n\\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\\n\\n\", \"before_files\": [{\"content\": \"# -*- coding: utf-8 -*-\\nfrom __future__ import unicode_literals\\n\\nfrom django.db import models, migrations\\n\\n\\nclass Migration(migrations.Migration):\\n\\n dependencies = [\\n ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\\n ]\\n\\n operations = [\\n migrations.AlterField(\\n model_name='grouppagepermission',\\n name='permission_type',\\n field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\\n preserve_default=True,\\n ),\\n ]\\n\", \"path\": \"wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1191,"string":"1,191"},"num_tokens_diff":{"kind":"number","value":171,"string":"171"}}},{"rowIdx":18135,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_36962"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"medtagger__MedTagger-188"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nDisable (temporarily) conversion in X & Y axes\n## Expected Behavior\r\n\r\nFrontend won't be enabled soon, so backend should disable such conversion (for now).\r\n\r\n## Actual Behavior\r\n\r\nBackend always create views from X & Y axes but are not used anywhere.\r\n\r\n## Additional comment\r\n\r\nLet's make it optional and controllable somehow by users (ideally from the UI & API side?).\r\n\n\n\n\n[start of backend/medtagger/workers/conversion.py]\n1 \"\"\"Module responsible for asynchronous data conversion.\"\"\"\n2 import io\n3 import os\n4 import tempfile\n5 from subprocess import call\n6 from typing import List, Optional\n7 \n8 import numpy as np\n9 import pydicom\n10 from pydicom.dataset import FileDataset\n11 from PIL import Image\n12 from celery.utils.log import get_task_logger\n13 \n14 from medtagger.types import ScanID\n15 from medtagger.workers import celery_app\n16 from medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array\n17 from medtagger.database.models import SliceOrientation, Slice, Scan\n18 from medtagger.repositories.scans import ScansRepository\n19 from medtagger.repositories.slices import SlicesRepository\n20 \n21 logger = get_task_logger(__name__)\n22 \n23 MAX_PREVIEW_X_SIZE = 256\n24 \n25 \n26 @celery_app.task\n27 def convert_scan_to_png(scan_id: ScanID) -> None:\n28 \"\"\"Store Scan in HBase database.\n29 \n30 :param scan_id: ID of a Scan\n31 \"\"\"\n32 logger.info('Starting Scan (%s) conversion.', scan_id)\n33 temp_files_to_remove: List[str] = []\n34 scan = ScansRepository.get_scan_by_id(scan_id)\n35 slices = SlicesRepository.get_slices_by_scan_id(scan_id)\n36 if scan.declared_number_of_slices == 0:\n37 logger.error('This Scan is empty! Removing from database...')\n38 ScansRepository.delete_scan_by_id(scan_id)\n39 return\n40 \n41 # At first, collect all Dicom images for given Scan\n42 logger.info('Reading all Slices for this Scan... This may take a while...')\n43 dicom_images = []\n44 for _slice in slices:\n45 image = SlicesRepository.get_slice_original_image(_slice.id)\n46 dicom_image, files_to_remove = _get_dicom_image(image)\n47 dicom_images.append(dicom_image)\n48 temp_files_to_remove.extend(files_to_remove)\n49 \n50 # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\n51 logger.info('Converting each Slice in Z axis.')\n52 for dicom_image, _slice in zip(dicom_images, slices):\n53 slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n54 _convert_to_png_and_store(_slice, slice_pixels)\n55 \n56 # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n57 logger.info('Normalizing Scan in 3D. This may take a while...')\n58 normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n59 \n60 # Prepare Slices in other orientations\n61 logger.info('Preparing Slices in other axis.')\n62 _prepare_slices_in_y_orientation(normalized_scan, scan)\n63 _prepare_slices_in_x_orientation(normalized_scan, scan)\n64 \n65 logger.info('Marking whole Scan as converted.')\n66 scan.mark_as_converted()\n67 \n68 # Remove all temporarily created files for applying workaround\n69 for file_name in temp_files_to_remove:\n70 os.remove(file_name)\n71 \n72 \n73 def _get_dicom_image(image: bytes) -> FileDataset:\n74 \"\"\"Return PyDICOM image based on image from HBase.\n75 \n76 This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well.\n77 \n78 :param image: bytes with DICOM image (eg. from HBase)\n79 :return: PyDICOM Image\n80 \"\"\"\n81 # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start\n82 temp_file_name = _create_temporary_file(image)\n83 try:\n84 dicom_image = pydicom.read_file(temp_file_name, force=True)\n85 dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM...\n86 return dicom_image, [temp_file_name]\n87 except Exception: # pylint: disable=broad-except; Intended - too much cases to cover...\n88 # In case of any Exception - try to uncompress data from DICOM first\n89 temp_file_uncompressed = _create_temporary_file()\n90 call([\"gdcmconv\", \"--raw\", \"-i\", temp_file_name, \"-o\", temp_file_uncompressed]) # Convert to RAW DICOMs\n91 dicom_image = pydicom.read_file(temp_file_uncompressed, force=True)\n92 return dicom_image, [temp_file_name, temp_file_uncompressed]\n93 # UGLY WORKAROUND - Stop\n94 \n95 \n96 def _create_temporary_file(image: Optional[bytes] = None) -> str:\n97 \"\"\"Create new temporary file based on given DICOM image.\n98 \n99 This workaround enable support for compressed DICOMs that will be read by the GDCM\n100 low-level library. Please remove this workaround as soon as this FIX ME notice\n101 will be removed:\n102 https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77\n103 and this Issue will be closed:\n104 https://github.com/pydicom/pydicom/issues/233\n105 \n106 :param image: (optional) bytes with DICOM image\n107 :return: path to temporary file\n108 \"\"\"\n109 with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n110 temp_file_name = temp_file.name\n111 if image:\n112 temp_file.write(image)\n113 return temp_file_name\n114 \n115 \n116 def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n117 \"\"\"Prepare and save Slices in Y orientation.\n118 \n119 :param normalized_scan: Numpy array with 3D normalized Scan\n120 :param scan: Scan object to which new Slices should be added\n121 \"\"\"\n122 for y in range(normalized_scan.shape[1]):\n123 location = 100.0 * y / normalized_scan.shape[1]\n124 slice_pixels = normalized_scan[:, y, :]\n125 _slice = scan.add_slice(SliceOrientation.Y)\n126 _slice.update_location(location)\n127 _convert_to_png_and_store(_slice, slice_pixels)\n128 \n129 \n130 def _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n131 \"\"\"Prepare and save Slices in Y orientation.\n132 \n133 :param normalized_scan: Numpy array with 3D normalized Scan\n134 :param scan: Scan object to which new Slices should be added\n135 \"\"\"\n136 for x in range(normalized_scan.shape[2]):\n137 location = 100.0 * x / normalized_scan.shape[2]\n138 slice_pixels = normalized_scan[:, :, x]\n139 _slice = scan.add_slice(SliceOrientation.X)\n140 _slice.update_location(location)\n141 _convert_to_png_and_store(_slice, slice_pixels)\n142 \n143 \n144 def _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None:\n145 \"\"\"Convert given Slice's pixel array and store in databases.\n146 \n147 :param _slice: Slice database object\n148 :param slice_pixels: numpy array with Slice data\n149 \"\"\"\n150 converted_image = _convert_slice_pixels_to_png(slice_pixels)\n151 SlicesRepository.store_converted_image(_slice.id, converted_image)\n152 _slice.mark_as_converted()\n153 logger.info('%s converted and stored.', _slice)\n154 \n155 \n156 def _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes:\n157 \"\"\"Convert given Slice's pixel array to the PNG format in bytes.\n158 \n159 :param slice_pixels: Slice's pixel array\n160 :return: bytes with Slice formatted in PNG\n161 \"\"\"\n162 png_image = io.BytesIO()\n163 Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG')\n164 png_image.seek(0)\n165 return png_image.getvalue()\n166\n[end of backend/medtagger/workers/conversion.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/backend/medtagger/workers/conversion.py b/backend/medtagger/workers/conversion.py\n--- a/backend/medtagger/workers/conversion.py\n+++ b/backend/medtagger/workers/conversion.py\n@@ -20,6 +20,7 @@\n \n logger = get_task_logger(__name__)\n \n+CONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices\n MAX_PREVIEW_X_SIZE = 256\n \n \n@@ -47,20 +48,8 @@\n dicom_images.append(dicom_image)\n temp_files_to_remove.extend(files_to_remove)\n \n- # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\n- logger.info('Converting each Slice in Z axis.')\n- for dicom_image, _slice in zip(dicom_images, slices):\n- slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n- _convert_to_png_and_store(_slice, slice_pixels)\n-\n- # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n- logger.info('Normalizing Scan in 3D. This may take a while...')\n- normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n-\n- # Prepare Slices in other orientations\n- logger.info('Preparing Slices in other axis.')\n- _prepare_slices_in_y_orientation(normalized_scan, scan)\n- _prepare_slices_in_x_orientation(normalized_scan, scan)\n+ # Correlate Dicom files with Slices and convert all Slices\n+ _convert_scan_in_all_axes(dicom_images, slices, scan)\n \n logger.info('Marking whole Scan as converted.')\n scan.mark_as_converted()\n@@ -113,6 +102,32 @@\n return temp_file_name\n \n \n+def _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None:\n+ \"\"\"Convert Scan in X, Y and Z axes.\n+\n+ NOTE: X & Y axes are now disabled (until Frontend will support it).\n+\n+ :param dicom_images: list of all Dicom images\n+ :param slices: list of all Slices in given Scan\n+ :param scan: Scan object to which new Slices should be added\n+ \"\"\"\n+ logger.info('Converting each Slice in Z axis.')\n+ for dicom_image, _slice in zip(dicom_images, slices):\n+ slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n+ _convert_to_png_and_store(_slice, slice_pixels)\n+\n+ # Convert only if it's enabled\n+ if CONVERT_IN_OTHER_AXES:\n+ # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n+ logger.info('Normalizing Scan in 3D. This may take a while...')\n+ normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n+\n+ # Prepare Slices in other orientations\n+ logger.info('Preparing Slices in other axis.')\n+ _prepare_slices_in_y_orientation(normalized_scan, scan)\n+ _prepare_slices_in_x_orientation(normalized_scan, scan)\n+\n+\n def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/backend/medtagger/workers/conversion.py b/backend/medtagger/workers/conversion.py\\n--- a/backend/medtagger/workers/conversion.py\\n+++ b/backend/medtagger/workers/conversion.py\\n@@ -20,6 +20,7 @@\\n \\n logger = get_task_logger(__name__)\\n \\n+CONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices\\n MAX_PREVIEW_X_SIZE = 256\\n \\n \\n@@ -47,20 +48,8 @@\\n dicom_images.append(dicom_image)\\n temp_files_to_remove.extend(files_to_remove)\\n \\n- # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\\n- logger.info('Converting each Slice in Z axis.')\\n- for dicom_image, _slice in zip(dicom_images, slices):\\n- slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\\n- _convert_to_png_and_store(_slice, slice_pixels)\\n-\\n- # Prepare a preview size and convert 3D scan to fit its max X's axis shape\\n- logger.info('Normalizing Scan in 3D. This may take a while...')\\n- normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\\n-\\n- # Prepare Slices in other orientations\\n- logger.info('Preparing Slices in other axis.')\\n- _prepare_slices_in_y_orientation(normalized_scan, scan)\\n- _prepare_slices_in_x_orientation(normalized_scan, scan)\\n+ # Correlate Dicom files with Slices and convert all Slices\\n+ _convert_scan_in_all_axes(dicom_images, slices, scan)\\n \\n logger.info('Marking whole Scan as converted.')\\n scan.mark_as_converted()\\n@@ -113,6 +102,32 @@\\n return temp_file_name\\n \\n \\n+def _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None:\\n+ \\\"\\\"\\\"Convert Scan in X, Y and Z axes.\\n+\\n+ NOTE: X & Y axes are now disabled (until Frontend will support it).\\n+\\n+ :param dicom_images: list of all Dicom images\\n+ :param slices: list of all Slices in given Scan\\n+ :param scan: Scan object to which new Slices should be added\\n+ \\\"\\\"\\\"\\n+ logger.info('Converting each Slice in Z axis.')\\n+ for dicom_image, _slice in zip(dicom_images, slices):\\n+ slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\\n+ _convert_to_png_and_store(_slice, slice_pixels)\\n+\\n+ # Convert only if it's enabled\\n+ if CONVERT_IN_OTHER_AXES:\\n+ # Prepare a preview size and convert 3D scan to fit its max X's axis shape\\n+ logger.info('Normalizing Scan in 3D. This may take a while...')\\n+ normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\\n+\\n+ # Prepare Slices in other orientations\\n+ logger.info('Preparing Slices in other axis.')\\n+ _prepare_slices_in_y_orientation(normalized_scan, scan)\\n+ _prepare_slices_in_x_orientation(normalized_scan, scan)\\n+\\n+\\n def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\\n \\\"\\\"\\\"Prepare and save Slices in Y orientation.\\n\", \"issue\": \"Disable (temporarily) conversion in X & Y axes\\n## Expected Behavior\\r\\n\\r\\nFrontend won't be enabled soon, so backend should disable such conversion (for now).\\r\\n\\r\\n## Actual Behavior\\r\\n\\r\\nBackend always create views from X & Y axes but are not used anywhere.\\r\\n\\r\\n## Additional comment\\r\\n\\r\\nLet's make it optional and controllable somehow by users (ideally from the UI & API side?).\\r\\n\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Module responsible for asynchronous data conversion.\\\"\\\"\\\"\\nimport io\\nimport os\\nimport tempfile\\nfrom subprocess import call\\nfrom typing import List, Optional\\n\\nimport numpy as np\\nimport pydicom\\nfrom pydicom.dataset import FileDataset\\nfrom PIL import Image\\nfrom celery.utils.log import get_task_logger\\n\\nfrom medtagger.types import ScanID\\nfrom medtagger.workers import celery_app\\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array\\nfrom medtagger.database.models import SliceOrientation, Slice, Scan\\nfrom medtagger.repositories.scans import ScansRepository\\nfrom medtagger.repositories.slices import SlicesRepository\\n\\nlogger = get_task_logger(__name__)\\n\\nMAX_PREVIEW_X_SIZE = 256\\n\\n\\n@celery_app.task\\ndef convert_scan_to_png(scan_id: ScanID) -> None:\\n \\\"\\\"\\\"Store Scan in HBase database.\\n\\n :param scan_id: ID of a Scan\\n \\\"\\\"\\\"\\n logger.info('Starting Scan (%s) conversion.', scan_id)\\n temp_files_to_remove: List[str] = []\\n scan = ScansRepository.get_scan_by_id(scan_id)\\n slices = SlicesRepository.get_slices_by_scan_id(scan_id)\\n if scan.declared_number_of_slices == 0:\\n logger.error('This Scan is empty! Removing from database...')\\n ScansRepository.delete_scan_by_id(scan_id)\\n return\\n\\n # At first, collect all Dicom images for given Scan\\n logger.info('Reading all Slices for this Scan... This may take a while...')\\n dicom_images = []\\n for _slice in slices:\\n image = SlicesRepository.get_slice_original_image(_slice.id)\\n dicom_image, files_to_remove = _get_dicom_image(image)\\n dicom_images.append(dicom_image)\\n temp_files_to_remove.extend(files_to_remove)\\n\\n # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\\n logger.info('Converting each Slice in Z axis.')\\n for dicom_image, _slice in zip(dicom_images, slices):\\n slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\\n _convert_to_png_and_store(_slice, slice_pixels)\\n\\n # Prepare a preview size and convert 3D scan to fit its max X's axis shape\\n logger.info('Normalizing Scan in 3D. This may take a while...')\\n normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\\n\\n # Prepare Slices in other orientations\\n logger.info('Preparing Slices in other axis.')\\n _prepare_slices_in_y_orientation(normalized_scan, scan)\\n _prepare_slices_in_x_orientation(normalized_scan, scan)\\n\\n logger.info('Marking whole Scan as converted.')\\n scan.mark_as_converted()\\n\\n # Remove all temporarily created files for applying workaround\\n for file_name in temp_files_to_remove:\\n os.remove(file_name)\\n\\n\\ndef _get_dicom_image(image: bytes) -> FileDataset:\\n \\\"\\\"\\\"Return PyDICOM image based on image from HBase.\\n\\n This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well.\\n\\n :param image: bytes with DICOM image (eg. from HBase)\\n :return: PyDICOM Image\\n \\\"\\\"\\\"\\n # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start\\n temp_file_name = _create_temporary_file(image)\\n try:\\n dicom_image = pydicom.read_file(temp_file_name, force=True)\\n dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM...\\n return dicom_image, [temp_file_name]\\n except Exception: # pylint: disable=broad-except; Intended - too much cases to cover...\\n # In case of any Exception - try to uncompress data from DICOM first\\n temp_file_uncompressed = _create_temporary_file()\\n call([\\\"gdcmconv\\\", \\\"--raw\\\", \\\"-i\\\", temp_file_name, \\\"-o\\\", temp_file_uncompressed]) # Convert to RAW DICOMs\\n dicom_image = pydicom.read_file(temp_file_uncompressed, force=True)\\n return dicom_image, [temp_file_name, temp_file_uncompressed]\\n # UGLY WORKAROUND - Stop\\n\\n\\ndef _create_temporary_file(image: Optional[bytes] = None) -> str:\\n \\\"\\\"\\\"Create new temporary file based on given DICOM image.\\n\\n This workaround enable support for compressed DICOMs that will be read by the GDCM\\n low-level library. Please remove this workaround as soon as this FIX ME notice\\n will be removed:\\n https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77\\n and this Issue will be closed:\\n https://github.com/pydicom/pydicom/issues/233\\n\\n :param image: (optional) bytes with DICOM image\\n :return: path to temporary file\\n \\\"\\\"\\\"\\n with tempfile.NamedTemporaryFile(delete=False) as temp_file:\\n temp_file_name = temp_file.name\\n if image:\\n temp_file.write(image)\\n return temp_file_name\\n\\n\\ndef _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\\n \\\"\\\"\\\"Prepare and save Slices in Y orientation.\\n\\n :param normalized_scan: Numpy array with 3D normalized Scan\\n :param scan: Scan object to which new Slices should be added\\n \\\"\\\"\\\"\\n for y in range(normalized_scan.shape[1]):\\n location = 100.0 * y / normalized_scan.shape[1]\\n slice_pixels = normalized_scan[:, y, :]\\n _slice = scan.add_slice(SliceOrientation.Y)\\n _slice.update_location(location)\\n _convert_to_png_and_store(_slice, slice_pixels)\\n\\n\\ndef _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\\n \\\"\\\"\\\"Prepare and save Slices in Y orientation.\\n\\n :param normalized_scan: Numpy array with 3D normalized Scan\\n :param scan: Scan object to which new Slices should be added\\n \\\"\\\"\\\"\\n for x in range(normalized_scan.shape[2]):\\n location = 100.0 * x / normalized_scan.shape[2]\\n slice_pixels = normalized_scan[:, :, x]\\n _slice = scan.add_slice(SliceOrientation.X)\\n _slice.update_location(location)\\n _convert_to_png_and_store(_slice, slice_pixels)\\n\\n\\ndef _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None:\\n \\\"\\\"\\\"Convert given Slice's pixel array and store in databases.\\n\\n :param _slice: Slice database object\\n :param slice_pixels: numpy array with Slice data\\n \\\"\\\"\\\"\\n converted_image = _convert_slice_pixels_to_png(slice_pixels)\\n SlicesRepository.store_converted_image(_slice.id, converted_image)\\n _slice.mark_as_converted()\\n logger.info('%s converted and stored.', _slice)\\n\\n\\ndef _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes:\\n \\\"\\\"\\\"Convert given Slice's pixel array to the PNG format in bytes.\\n\\n :param slice_pixels: Slice's pixel array\\n :return: bytes with Slice formatted in PNG\\n \\\"\\\"\\\"\\n png_image = io.BytesIO()\\n Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG')\\n png_image.seek(0)\\n return png_image.getvalue()\\n\", \"path\": \"backend/medtagger/workers/conversion.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2635,"string":"2,635"},"num_tokens_diff":{"kind":"number","value":766,"string":"766"}}},{"rowIdx":18136,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_39711"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"Lightning-Universe__lightning-flash-824"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nVideoClassificationData.from_files() does not work\n## 🐛 Bug\r\n`VideoClassificationData.from_files()` does not work. `VideoClassificationData` objects can only be constructed using the `from_folders()` classmethod - which unfortunately doesn't work for multilabel tasks :)\r\n\r\n### To Reproduce\r\nI wrote a Colab notebook to reproduce this in a self-contained environment: https://colab.research.google.com/drive/1X7UvZDndCc0dzcUZ_fGdmQz0ZMTTsj_U?usp=sharing\r\n\r\n#### Code sample\r\nSee link to colab above\r\n\r\n### Expected behavior\r\nA `VideoClassificationData` object should be constructed from a list of paths and their labels\n\n\n\n[start of flash/video/classification/data.py]\n1 # Copyright The PyTorch Lightning team.\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 import pathlib\n15 from typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union\n16 \n17 import numpy as np\n18 import torch\n19 from pytorch_lightning.utilities.exceptions import MisconfigurationException\n20 from torch.utils.data import Sampler\n21 \n22 from flash.core.data.data_module import DataModule\n23 from flash.core.data.data_source import (\n24 DefaultDataKeys,\n25 DefaultDataSources,\n26 FiftyOneDataSource,\n27 LabelsState,\n28 PathsDataSource,\n29 )\n30 from flash.core.data.process import Preprocess\n31 from flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource\n32 from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import\n33 \n34 SampleCollection = None\n35 if _FIFTYONE_AVAILABLE:\n36 fol = lazy_import(\"fiftyone.core.labels\")\n37 if TYPE_CHECKING:\n38 from fiftyone.core.collections import SampleCollection\n39 else:\n40 fol = None\n41 \n42 if _KORNIA_AVAILABLE:\n43 import kornia.augmentation as K\n44 \n45 if _PYTORCHVIDEO_AVAILABLE:\n46 from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler\n47 from pytorchvideo.data.encoded_video import EncodedVideo\n48 from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset\n49 from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths\n50 from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample\n51 from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip\n52 else:\n53 ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n54 \n55 _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n56 \n57 \n58 class BaseVideoClassification:\n59 def __init__(\n60 self,\n61 clip_sampler: \"ClipSampler\",\n62 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n63 decode_audio: bool = True,\n64 decoder: str = \"pyav\",\n65 ):\n66 self.clip_sampler = clip_sampler\n67 self.video_sampler = video_sampler\n68 self.decode_audio = decode_audio\n69 self.decoder = decoder\n70 \n71 def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n72 ds = self._make_encoded_video_dataset(data)\n73 if self.training:\n74 label_to_class_mapping = {p[1]: p[0].split(\"/\")[-2] for p in ds._labeled_videos._paths_and_labels}\n75 self.set_state(LabelsState(label_to_class_mapping))\n76 dataset.num_classes = len(np.unique([s[1][\"label\"] for s in ds._labeled_videos]))\n77 return ds\n78 \n79 def load_sample(self, sample):\n80 return sample\n81 \n82 def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n83 video_path = sample[DefaultDataKeys.INPUT]\n84 sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path)))\n85 sample[DefaultDataKeys.METADATA] = {\"filepath\": video_path}\n86 return sample\n87 \n88 def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\n89 (\n90 clip_start,\n91 clip_end,\n92 clip_index,\n93 aug_index,\n94 is_last_clip,\n95 ) = self.clip_sampler(0.0, video.duration, annotation)\n96 \n97 loaded_clip = video.get_clip(clip_start, clip_end)\n98 \n99 clip_is_null = (\n100 loaded_clip is None or loaded_clip[\"video\"] is None or (loaded_clip[\"audio\"] is None and self.decode_audio)\n101 )\n102 \n103 if clip_is_null:\n104 raise MisconfigurationException(\n105 f\"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}\"\n106 )\n107 \n108 frames = loaded_clip[\"video\"]\n109 audio_samples = loaded_clip[\"audio\"]\n110 return {\n111 \"video\": frames,\n112 \"video_name\": video.name,\n113 \"video_index\": 0,\n114 \"clip_index\": clip_index,\n115 \"aug_index\": aug_index,\n116 **({\"audio\": audio_samples} if audio_samples is not None else {}),\n117 }\n118 \n119 def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n120 raise NotImplementedError(\"Subclass must implement _make_encoded_video_dataset()\")\n121 \n122 \n123 class VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource):\n124 def __init__(\n125 self,\n126 clip_sampler: \"ClipSampler\",\n127 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n128 decode_audio: bool = True,\n129 decoder: str = \"pyav\",\n130 ):\n131 super().__init__(\n132 clip_sampler,\n133 video_sampler=video_sampler,\n134 decode_audio=decode_audio,\n135 decoder=decoder,\n136 )\n137 PathsDataSource.__init__(\n138 self,\n139 extensions=(\"mp4\", \"avi\"),\n140 )\n141 \n142 def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n143 ds: LabeledVideoDataset = labeled_video_dataset(\n144 pathlib.Path(data),\n145 self.clip_sampler,\n146 video_sampler=self.video_sampler,\n147 decode_audio=self.decode_audio,\n148 decoder=self.decoder,\n149 )\n150 return ds\n151 \n152 \n153 class VideoClassificationFiftyOneDataSource(\n154 BaseVideoClassification,\n155 FiftyOneDataSource,\n156 ):\n157 def __init__(\n158 self,\n159 clip_sampler: \"ClipSampler\",\n160 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n161 decode_audio: bool = True,\n162 decoder: str = \"pyav\",\n163 label_field: str = \"ground_truth\",\n164 ):\n165 super().__init__(\n166 clip_sampler=clip_sampler,\n167 video_sampler=video_sampler,\n168 decode_audio=decode_audio,\n169 decoder=decoder,\n170 )\n171 FiftyOneDataSource.__init__(\n172 self,\n173 label_field=label_field,\n174 )\n175 \n176 @property\n177 def label_cls(self):\n178 return fol.Classification\n179 \n180 def _make_encoded_video_dataset(self, data: SampleCollection) -> \"LabeledVideoDataset\":\n181 classes = self._get_classes(data)\n182 label_to_class_mapping = dict(enumerate(classes))\n183 class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}\n184 \n185 filepaths = data.values(\"filepath\")\n186 labels = data.values(self.label_field + \".label\")\n187 targets = [class_to_label_mapping[lab] for lab in labels]\n188 labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets)))\n189 \n190 ds: LabeledVideoDataset = LabeledVideoDataset(\n191 labeled_video_paths,\n192 self.clip_sampler,\n193 video_sampler=self.video_sampler,\n194 decode_audio=self.decode_audio,\n195 decoder=self.decoder,\n196 )\n197 return ds\n198 \n199 \n200 class VideoClassificationPreprocess(Preprocess):\n201 def __init__(\n202 self,\n203 train_transform: Optional[Dict[str, Callable]] = None,\n204 val_transform: Optional[Dict[str, Callable]] = None,\n205 test_transform: Optional[Dict[str, Callable]] = None,\n206 predict_transform: Optional[Dict[str, Callable]] = None,\n207 clip_sampler: Union[str, \"ClipSampler\"] = \"random\",\n208 clip_duration: float = 2,\n209 clip_sampler_kwargs: Dict[str, Any] = None,\n210 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n211 decode_audio: bool = True,\n212 decoder: str = \"pyav\",\n213 **data_source_kwargs: Any,\n214 ):\n215 self.clip_sampler = clip_sampler\n216 self.clip_duration = clip_duration\n217 self.clip_sampler_kwargs = clip_sampler_kwargs\n218 self.video_sampler = video_sampler\n219 self.decode_audio = decode_audio\n220 self.decoder = decoder\n221 \n222 if not _PYTORCHVIDEO_AVAILABLE:\n223 raise ModuleNotFoundError(\"Please, run `pip install pytorchvideo`.\")\n224 \n225 if not clip_sampler_kwargs:\n226 clip_sampler_kwargs = {}\n227 \n228 if not clip_sampler:\n229 raise MisconfigurationException(\n230 \"clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``\"\n231 )\n232 \n233 clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)\n234 \n235 super().__init__(\n236 train_transform=train_transform,\n237 val_transform=val_transform,\n238 test_transform=test_transform,\n239 predict_transform=predict_transform,\n240 data_sources={\n241 DefaultDataSources.FILES: VideoClassificationPathsDataSource(\n242 clip_sampler,\n243 video_sampler=video_sampler,\n244 decode_audio=decode_audio,\n245 decoder=decoder,\n246 ),\n247 DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource(\n248 clip_sampler,\n249 video_sampler=video_sampler,\n250 decode_audio=decode_audio,\n251 decoder=decoder,\n252 ),\n253 DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource(\n254 clip_sampler,\n255 video_sampler=video_sampler,\n256 decode_audio=decode_audio,\n257 decoder=decoder,\n258 **data_source_kwargs,\n259 ),\n260 DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource(\n261 clip_sampler=clip_sampler,\n262 video_sampler=video_sampler,\n263 decode_audio=decode_audio,\n264 decoder=decoder,\n265 **data_source_kwargs,\n266 ),\n267 },\n268 default_data_source=DefaultDataSources.FILES,\n269 )\n270 \n271 def get_state_dict(self) -> Dict[str, Any]:\n272 return {\n273 **self.transforms,\n274 \"clip_sampler\": self.clip_sampler,\n275 \"clip_duration\": self.clip_duration,\n276 \"clip_sampler_kwargs\": self.clip_sampler_kwargs,\n277 \"video_sampler\": self.video_sampler,\n278 \"decode_audio\": self.decode_audio,\n279 \"decoder\": self.decoder,\n280 }\n281 \n282 @classmethod\n283 def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> \"VideoClassificationPreprocess\":\n284 return cls(**state_dict)\n285 \n286 def default_transforms(self) -> Dict[str, Callable]:\n287 if self.training:\n288 post_tensor_transform = [\n289 RandomCrop(244, pad_if_needed=True),\n290 RandomHorizontalFlip(p=0.5),\n291 ]\n292 else:\n293 post_tensor_transform = [\n294 CenterCrop(244),\n295 ]\n296 \n297 return {\n298 \"post_tensor_transform\": Compose(\n299 [\n300 ApplyTransformToKey(\n301 key=\"video\",\n302 transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform),\n303 ),\n304 ]\n305 ),\n306 \"per_batch_transform_on_device\": Compose(\n307 [\n308 ApplyTransformToKey(\n309 key=\"video\",\n310 transform=K.VideoSequential(\n311 K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),\n312 data_format=\"BCTHW\",\n313 same_on_frame=False,\n314 ),\n315 ),\n316 ]\n317 ),\n318 }\n319 \n320 \n321 class VideoClassificationData(DataModule):\n322 \"\"\"Data module for Video classification tasks.\"\"\"\n323 \n324 preprocess_cls = VideoClassificationPreprocess\n325\n[end of flash/video/classification/data.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/flash/video/classification/data.py b/flash/video/classification/data.py\n--- a/flash/video/classification/data.py\n+++ b/flash/video/classification/data.py\n@@ -54,6 +54,8 @@\n \n _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n \n+Label = Union[int, List[int]]\n+\n \n class BaseVideoClassification:\n def __init__(\n@@ -150,6 +152,76 @@\n return ds\n \n \n+class VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource):\n+ def __init__(\n+ self,\n+ clip_sampler: \"ClipSampler\",\n+ video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n+ decode_audio: bool = True,\n+ decoder: str = \"pyav\",\n+ ):\n+ super().__init__(\n+ clip_sampler,\n+ video_sampler=video_sampler,\n+ decode_audio=decode_audio,\n+ decoder=decoder,\n+ )\n+ PathsDataSource.__init__(\n+ self,\n+ extensions=(\"mp4\", \"avi\"),\n+ )\n+\n+ def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:\n+ v = torch.zeros(len(self.labels_set))\n+ for label in label_list:\n+ v[label] = 1\n+ return v\n+\n+ def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n+ [paths, str_labels] = data\n+ self.is_multilabel = any(isinstance(label, list) for label in str_labels)\n+ if self.is_multilabel:\n+ self.labels_set = {label for label_list in str_labels for label in label_list}\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n+\n+ encoded_labels = [\n+ self._to_multi_hot([self.label_to_id[classname] for classname in label_list])\n+ for label_list in str_labels\n+ ]\n+\n+ data = list(\n+ zip(\n+ paths,\n+ encoded_labels,\n+ )\n+ )\n+ else:\n+ self.labels_set = set(str_labels)\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n+ data = list(\n+ zip(paths, [self.label_to_id[classname] for classname in str_labels])\n+ ) # List[Lists] -> List[Tuples]\n+ labeled_video_paths = LabeledVideoPaths(data)\n+ ds = LabeledVideoDataset(\n+ labeled_video_paths,\n+ self.clip_sampler,\n+ video_sampler=self.video_sampler,\n+ decode_audio=self.decode_audio,\n+ decoder=self.decoder,\n+ )\n+ return ds\n+\n+ def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n+ ds = self._make_encoded_video_dataset(data)\n+\n+ if self.training:\n+ self.set_state(LabelsState(self.id_to_label))\n+ dataset.num_classes = len(self.labels_set)\n+ return ds\n+\n+\n class VideoClassificationFiftyOneDataSource(\n BaseVideoClassification,\n FiftyOneDataSource,\n@@ -238,7 +310,7 @@\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n- DefaultDataSources.FILES: VideoClassificationPathsDataSource(\n+ DefaultDataSources.FILES: VideoClassificationListDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/flash/video/classification/data.py b/flash/video/classification/data.py\\n--- a/flash/video/classification/data.py\\n+++ b/flash/video/classification/data.py\\n@@ -54,6 +54,8 @@\\n \\n _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\\n \\n+Label = Union[int, List[int]]\\n+\\n \\n class BaseVideoClassification:\\n def __init__(\\n@@ -150,6 +152,76 @@\\n return ds\\n \\n \\n+class VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource):\\n+ def __init__(\\n+ self,\\n+ clip_sampler: \\\"ClipSampler\\\",\\n+ video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\\n+ decode_audio: bool = True,\\n+ decoder: str = \\\"pyav\\\",\\n+ ):\\n+ super().__init__(\\n+ clip_sampler,\\n+ video_sampler=video_sampler,\\n+ decode_audio=decode_audio,\\n+ decoder=decoder,\\n+ )\\n+ PathsDataSource.__init__(\\n+ self,\\n+ extensions=(\\\"mp4\\\", \\\"avi\\\"),\\n+ )\\n+\\n+ def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:\\n+ v = torch.zeros(len(self.labels_set))\\n+ for label in label_list:\\n+ v[label] = 1\\n+ return v\\n+\\n+ def _make_encoded_video_dataset(self, data) -> \\\"LabeledVideoDataset\\\":\\n+ [paths, str_labels] = data\\n+ self.is_multilabel = any(isinstance(label, list) for label in str_labels)\\n+ if self.is_multilabel:\\n+ self.labels_set = {label for label_list in str_labels for label in label_list}\\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\\n+\\n+ encoded_labels = [\\n+ self._to_multi_hot([self.label_to_id[classname] for classname in label_list])\\n+ for label_list in str_labels\\n+ ]\\n+\\n+ data = list(\\n+ zip(\\n+ paths,\\n+ encoded_labels,\\n+ )\\n+ )\\n+ else:\\n+ self.labels_set = set(str_labels)\\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\\n+ data = list(\\n+ zip(paths, [self.label_to_id[classname] for classname in str_labels])\\n+ ) # List[Lists] -> List[Tuples]\\n+ labeled_video_paths = LabeledVideoPaths(data)\\n+ ds = LabeledVideoDataset(\\n+ labeled_video_paths,\\n+ self.clip_sampler,\\n+ video_sampler=self.video_sampler,\\n+ decode_audio=self.decode_audio,\\n+ decoder=self.decoder,\\n+ )\\n+ return ds\\n+\\n+ def load_data(self, data: str, dataset: Optional[Any] = None) -> \\\"LabeledVideoDataset\\\":\\n+ ds = self._make_encoded_video_dataset(data)\\n+\\n+ if self.training:\\n+ self.set_state(LabelsState(self.id_to_label))\\n+ dataset.num_classes = len(self.labels_set)\\n+ return ds\\n+\\n+\\n class VideoClassificationFiftyOneDataSource(\\n BaseVideoClassification,\\n FiftyOneDataSource,\\n@@ -238,7 +310,7 @@\\n test_transform=test_transform,\\n predict_transform=predict_transform,\\n data_sources={\\n- DefaultDataSources.FILES: VideoClassificationPathsDataSource(\\n+ DefaultDataSources.FILES: VideoClassificationListDataSource(\\n clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n\", \"issue\": \"VideoClassificationData.from_files() does not work\\n## \\ud83d\\udc1b Bug\\r\\n`VideoClassificationData.from_files()` does not work. `VideoClassificationData` objects can only be constructed using the `from_folders()` classmethod - which unfortunately doesn't work for multilabel tasks :)\\r\\n\\r\\n### To Reproduce\\r\\nI wrote a Colab notebook to reproduce this in a self-contained environment: https://colab.research.google.com/drive/1X7UvZDndCc0dzcUZ_fGdmQz0ZMTTsj_U?usp=sharing\\r\\n\\r\\n#### Code sample\\r\\nSee link to colab above\\r\\n\\r\\n### Expected behavior\\r\\nA `VideoClassificationData` object should be constructed from a list of paths and their labels\\n\", \"before_files\": [{\"content\": \"# Copyright The PyTorch Lightning team.\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\nimport pathlib\\nfrom typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union\\n\\nimport numpy as np\\nimport torch\\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\\nfrom torch.utils.data import Sampler\\n\\nfrom flash.core.data.data_module import DataModule\\nfrom flash.core.data.data_source import (\\n DefaultDataKeys,\\n DefaultDataSources,\\n FiftyOneDataSource,\\n LabelsState,\\n PathsDataSource,\\n)\\nfrom flash.core.data.process import Preprocess\\nfrom flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource\\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import\\n\\nSampleCollection = None\\nif _FIFTYONE_AVAILABLE:\\n fol = lazy_import(\\\"fiftyone.core.labels\\\")\\n if TYPE_CHECKING:\\n from fiftyone.core.collections import SampleCollection\\nelse:\\n fol = None\\n\\nif _KORNIA_AVAILABLE:\\n import kornia.augmentation as K\\n\\nif _PYTORCHVIDEO_AVAILABLE:\\n from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler\\n from pytorchvideo.data.encoded_video import EncodedVideo\\n from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset\\n from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths\\n from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample\\n from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip\\nelse:\\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\\n\\n_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\\n\\n\\nclass BaseVideoClassification:\\n def __init__(\\n self,\\n clip_sampler: \\\"ClipSampler\\\",\\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\\n decode_audio: bool = True,\\n decoder: str = \\\"pyav\\\",\\n ):\\n self.clip_sampler = clip_sampler\\n self.video_sampler = video_sampler\\n self.decode_audio = decode_audio\\n self.decoder = decoder\\n\\n def load_data(self, data: str, dataset: Optional[Any] = None) -> \\\"LabeledVideoDataset\\\":\\n ds = self._make_encoded_video_dataset(data)\\n if self.training:\\n label_to_class_mapping = {p[1]: p[0].split(\\\"/\\\")[-2] for p in ds._labeled_videos._paths_and_labels}\\n self.set_state(LabelsState(label_to_class_mapping))\\n dataset.num_classes = len(np.unique([s[1][\\\"label\\\"] for s in ds._labeled_videos]))\\n return ds\\n\\n def load_sample(self, sample):\\n return sample\\n\\n def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\\n video_path = sample[DefaultDataKeys.INPUT]\\n sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path)))\\n sample[DefaultDataKeys.METADATA] = {\\\"filepath\\\": video_path}\\n return sample\\n\\n def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\\n (\\n clip_start,\\n clip_end,\\n clip_index,\\n aug_index,\\n is_last_clip,\\n ) = self.clip_sampler(0.0, video.duration, annotation)\\n\\n loaded_clip = video.get_clip(clip_start, clip_end)\\n\\n clip_is_null = (\\n loaded_clip is None or loaded_clip[\\\"video\\\"] is None or (loaded_clip[\\\"audio\\\"] is None and self.decode_audio)\\n )\\n\\n if clip_is_null:\\n raise MisconfigurationException(\\n f\\\"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}\\\"\\n )\\n\\n frames = loaded_clip[\\\"video\\\"]\\n audio_samples = loaded_clip[\\\"audio\\\"]\\n return {\\n \\\"video\\\": frames,\\n \\\"video_name\\\": video.name,\\n \\\"video_index\\\": 0,\\n \\\"clip_index\\\": clip_index,\\n \\\"aug_index\\\": aug_index,\\n **({\\\"audio\\\": audio_samples} if audio_samples is not None else {}),\\n }\\n\\n def _make_encoded_video_dataset(self, data) -> \\\"LabeledVideoDataset\\\":\\n raise NotImplementedError(\\\"Subclass must implement _make_encoded_video_dataset()\\\")\\n\\n\\nclass VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource):\\n def __init__(\\n self,\\n clip_sampler: \\\"ClipSampler\\\",\\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\\n decode_audio: bool = True,\\n decoder: str = \\\"pyav\\\",\\n ):\\n super().__init__(\\n clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n decoder=decoder,\\n )\\n PathsDataSource.__init__(\\n self,\\n extensions=(\\\"mp4\\\", \\\"avi\\\"),\\n )\\n\\n def _make_encoded_video_dataset(self, data) -> \\\"LabeledVideoDataset\\\":\\n ds: LabeledVideoDataset = labeled_video_dataset(\\n pathlib.Path(data),\\n self.clip_sampler,\\n video_sampler=self.video_sampler,\\n decode_audio=self.decode_audio,\\n decoder=self.decoder,\\n )\\n return ds\\n\\n\\nclass VideoClassificationFiftyOneDataSource(\\n BaseVideoClassification,\\n FiftyOneDataSource,\\n):\\n def __init__(\\n self,\\n clip_sampler: \\\"ClipSampler\\\",\\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\\n decode_audio: bool = True,\\n decoder: str = \\\"pyav\\\",\\n label_field: str = \\\"ground_truth\\\",\\n ):\\n super().__init__(\\n clip_sampler=clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n decoder=decoder,\\n )\\n FiftyOneDataSource.__init__(\\n self,\\n label_field=label_field,\\n )\\n\\n @property\\n def label_cls(self):\\n return fol.Classification\\n\\n def _make_encoded_video_dataset(self, data: SampleCollection) -> \\\"LabeledVideoDataset\\\":\\n classes = self._get_classes(data)\\n label_to_class_mapping = dict(enumerate(classes))\\n class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}\\n\\n filepaths = data.values(\\\"filepath\\\")\\n labels = data.values(self.label_field + \\\".label\\\")\\n targets = [class_to_label_mapping[lab] for lab in labels]\\n labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets)))\\n\\n ds: LabeledVideoDataset = LabeledVideoDataset(\\n labeled_video_paths,\\n self.clip_sampler,\\n video_sampler=self.video_sampler,\\n decode_audio=self.decode_audio,\\n decoder=self.decoder,\\n )\\n return ds\\n\\n\\nclass VideoClassificationPreprocess(Preprocess):\\n def __init__(\\n self,\\n train_transform: Optional[Dict[str, Callable]] = None,\\n val_transform: Optional[Dict[str, Callable]] = None,\\n test_transform: Optional[Dict[str, Callable]] = None,\\n predict_transform: Optional[Dict[str, Callable]] = None,\\n clip_sampler: Union[str, \\\"ClipSampler\\\"] = \\\"random\\\",\\n clip_duration: float = 2,\\n clip_sampler_kwargs: Dict[str, Any] = None,\\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\\n decode_audio: bool = True,\\n decoder: str = \\\"pyav\\\",\\n **data_source_kwargs: Any,\\n ):\\n self.clip_sampler = clip_sampler\\n self.clip_duration = clip_duration\\n self.clip_sampler_kwargs = clip_sampler_kwargs\\n self.video_sampler = video_sampler\\n self.decode_audio = decode_audio\\n self.decoder = decoder\\n\\n if not _PYTORCHVIDEO_AVAILABLE:\\n raise ModuleNotFoundError(\\\"Please, run `pip install pytorchvideo`.\\\")\\n\\n if not clip_sampler_kwargs:\\n clip_sampler_kwargs = {}\\n\\n if not clip_sampler:\\n raise MisconfigurationException(\\n \\\"clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``\\\"\\n )\\n\\n clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)\\n\\n super().__init__(\\n train_transform=train_transform,\\n val_transform=val_transform,\\n test_transform=test_transform,\\n predict_transform=predict_transform,\\n data_sources={\\n DefaultDataSources.FILES: VideoClassificationPathsDataSource(\\n clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n decoder=decoder,\\n ),\\n DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource(\\n clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n decoder=decoder,\\n ),\\n DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource(\\n clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n decoder=decoder,\\n **data_source_kwargs,\\n ),\\n DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource(\\n clip_sampler=clip_sampler,\\n video_sampler=video_sampler,\\n decode_audio=decode_audio,\\n decoder=decoder,\\n **data_source_kwargs,\\n ),\\n },\\n default_data_source=DefaultDataSources.FILES,\\n )\\n\\n def get_state_dict(self) -> Dict[str, Any]:\\n return {\\n **self.transforms,\\n \\\"clip_sampler\\\": self.clip_sampler,\\n \\\"clip_duration\\\": self.clip_duration,\\n \\\"clip_sampler_kwargs\\\": self.clip_sampler_kwargs,\\n \\\"video_sampler\\\": self.video_sampler,\\n \\\"decode_audio\\\": self.decode_audio,\\n \\\"decoder\\\": self.decoder,\\n }\\n\\n @classmethod\\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> \\\"VideoClassificationPreprocess\\\":\\n return cls(**state_dict)\\n\\n def default_transforms(self) -> Dict[str, Callable]:\\n if self.training:\\n post_tensor_transform = [\\n RandomCrop(244, pad_if_needed=True),\\n RandomHorizontalFlip(p=0.5),\\n ]\\n else:\\n post_tensor_transform = [\\n CenterCrop(244),\\n ]\\n\\n return {\\n \\\"post_tensor_transform\\\": Compose(\\n [\\n ApplyTransformToKey(\\n key=\\\"video\\\",\\n transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform),\\n ),\\n ]\\n ),\\n \\\"per_batch_transform_on_device\\\": Compose(\\n [\\n ApplyTransformToKey(\\n key=\\\"video\\\",\\n transform=K.VideoSequential(\\n K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),\\n data_format=\\\"BCTHW\\\",\\n same_on_frame=False,\\n ),\\n ),\\n ]\\n ),\\n }\\n\\n\\nclass VideoClassificationData(DataModule):\\n \\\"\\\"\\\"Data module for Video classification tasks.\\\"\\\"\\\"\\n\\n preprocess_cls = VideoClassificationPreprocess\\n\", \"path\": \"flash/video/classification/data.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3998,"string":"3,998"},"num_tokens_diff":{"kind":"number","value":818,"string":"818"}}},{"rowIdx":18137,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_33817"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"open-telemetry__opentelemetry-python-contrib-530"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nopentelemetry-instrument command fails if incompatible instrumentation is found\nIf an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:\r\n\r\n\r\n```\r\n❯ opentelemetry-instrument python main.py\r\nInstrumenting of flask failed\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\nFailed to auto initialize opentelemetry\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 111, in initialize\r\n _load_instrumentors(distro)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 85, in _load_instrumentors\r\n raise exc\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\n```\r\n\r\nbootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.\n\n\n\n[start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py]\n1 from typing import Collection, Optional\n2 \n3 from pkg_resources import (\n4 Distribution,\n5 DistributionNotFound,\n6 VersionConflict,\n7 get_distribution,\n8 )\n9 \n10 \n11 class DependencyConflict:\n12 required: str = None\n13 found: Optional[str] = None\n14 \n15 def __init__(self, required, found=None):\n16 self.required = required\n17 self.found = found\n18 \n19 def __str__(self):\n20 return 'DependencyConflict: requested: \"{0}\" but found: \"{1}\"'.format(\n21 self.required, self.found\n22 )\n23 \n24 \n25 def get_dist_dependency_conflicts(\n26 dist: Distribution,\n27 ) -> Optional[DependencyConflict]:\n28 deps = [\n29 dep\n30 for dep in dist.requires((\"instruments\",))\n31 if dep not in dist.requires()\n32 ]\n33 return get_dependency_conflicts(deps)\n34 \n35 \n36 def get_dependency_conflicts(\n37 deps: Collection[str],\n38 ) -> Optional[DependencyConflict]:\n39 for dep in deps:\n40 try:\n41 get_distribution(str(dep))\n42 except VersionConflict as exc:\n43 return DependencyConflict(dep, exc.dist)\n44 except DistributionNotFound:\n45 return DependencyConflict(dep)\n46 return None\n47\n[end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n@@ -1,12 +1,16 @@\n+from logging import getLogger\n from typing import Collection, Optional\n \n from pkg_resources import (\n Distribution,\n DistributionNotFound,\n+ RequirementParseError,\n VersionConflict,\n get_distribution,\n )\n \n+logger = getLogger(__file__)\n+\n \n class DependencyConflict:\n required: str = None\n@@ -25,12 +29,19 @@\n def get_dist_dependency_conflicts(\n dist: Distribution,\n ) -> Optional[DependencyConflict]:\n- deps = [\n- dep\n- for dep in dist.requires((\"instruments\",))\n- if dep not in dist.requires()\n- ]\n- return get_dependency_conflicts(deps)\n+ main_deps = dist.requires()\n+ instrumentation_deps = []\n+ for dep in dist.requires((\"instruments\",)):\n+ if dep not in main_deps:\n+ # we set marker to none so string representation of the dependency looks like\n+ # requests ~= 1.0\n+ # instead of\n+ # requests ~= 1.0; extra = \"instruments\"\n+ # which does not work with `get_distribution()`\n+ dep.marker = None\n+ instrumentation_deps.append(str(dep))\n+\n+ return get_dependency_conflicts(instrumentation_deps)\n \n \n def get_dependency_conflicts(\n@@ -38,9 +49,16 @@\n ) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n- get_distribution(str(dep))\n+ get_distribution(dep)\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n+ except RequirementParseError as exc:\n+ logger.warning(\n+ 'error parsing dependency, reporting as a conflict: \"%s\" - %s',\n+ dep,\n+ exc,\n+ )\n+ return DependencyConflict(dep)\n return None\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\\n@@ -1,12 +1,16 @@\\n+from logging import getLogger\\n from typing import Collection, Optional\\n \\n from pkg_resources import (\\n Distribution,\\n DistributionNotFound,\\n+ RequirementParseError,\\n VersionConflict,\\n get_distribution,\\n )\\n \\n+logger = getLogger(__file__)\\n+\\n \\n class DependencyConflict:\\n required: str = None\\n@@ -25,12 +29,19 @@\\n def get_dist_dependency_conflicts(\\n dist: Distribution,\\n ) -> Optional[DependencyConflict]:\\n- deps = [\\n- dep\\n- for dep in dist.requires((\\\"instruments\\\",))\\n- if dep not in dist.requires()\\n- ]\\n- return get_dependency_conflicts(deps)\\n+ main_deps = dist.requires()\\n+ instrumentation_deps = []\\n+ for dep in dist.requires((\\\"instruments\\\",)):\\n+ if dep not in main_deps:\\n+ # we set marker to none so string representation of the dependency looks like\\n+ # requests ~= 1.0\\n+ # instead of\\n+ # requests ~= 1.0; extra = \\\"instruments\\\"\\n+ # which does not work with `get_distribution()`\\n+ dep.marker = None\\n+ instrumentation_deps.append(str(dep))\\n+\\n+ return get_dependency_conflicts(instrumentation_deps)\\n \\n \\n def get_dependency_conflicts(\\n@@ -38,9 +49,16 @@\\n ) -> Optional[DependencyConflict]:\\n for dep in deps:\\n try:\\n- get_distribution(str(dep))\\n+ get_distribution(dep)\\n except VersionConflict as exc:\\n return DependencyConflict(dep, exc.dist)\\n except DistributionNotFound:\\n return DependencyConflict(dep)\\n+ except RequirementParseError as exc:\\n+ logger.warning(\\n+ 'error parsing dependency, reporting as a conflict: \\\"%s\\\" - %s',\\n+ dep,\\n+ exc,\\n+ )\\n+ return DependencyConflict(dep)\\n return None\\n\", \"issue\": \"opentelemetry-instrument command fails if incompatible instrumentation is found\\nIf an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:\\r\\n\\r\\n\\r\\n```\\r\\n\\u276f opentelemetry-instrument python main.py\\r\\nInstrumenting of flask failed\\r\\nTraceback (most recent call last):\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\\\", line 71, in _load_instrumentors\\r\\n conflict = get_dist_dependency_conflicts(entry_point.dist)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\\\", line 33, in get_dist_dependency_conflicts\\r\\n return get_dependency_conflicts(deps)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\\\", line 41, in get_dependency_conflicts\\r\\n get_distribution(str(dep))\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\\\", line 482, in get_distribution\\r\\n dist = get_provider(dist)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\\\", line 358, in get_provider\\r\\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\\r\\nIndexError: list index out of range\\r\\nFailed to auto initialize opentelemetry\\r\\nTraceback (most recent call last):\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\\\", line 111, in initialize\\r\\n _load_instrumentors(distro)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\\\", line 85, in _load_instrumentors\\r\\n raise exc\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\\\", line 71, in _load_instrumentors\\r\\n conflict = get_dist_dependency_conflicts(entry_point.dist)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\\\", line 33, in get_dist_dependency_conflicts\\r\\n return get_dependency_conflicts(deps)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\\\", line 41, in get_dependency_conflicts\\r\\n get_distribution(str(dep))\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\\\", line 482, in get_distribution\\r\\n dist = get_provider(dist)\\r\\n File \\\"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\\\", line 358, in get_provider\\r\\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\\r\\nIndexError: list index out of range\\r\\n```\\r\\n\\r\\nbootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.\\n\", \"before_files\": [{\"content\": \"from typing import Collection, Optional\\n\\nfrom pkg_resources import (\\n Distribution,\\n DistributionNotFound,\\n VersionConflict,\\n get_distribution,\\n)\\n\\n\\nclass DependencyConflict:\\n required: str = None\\n found: Optional[str] = None\\n\\n def __init__(self, required, found=None):\\n self.required = required\\n self.found = found\\n\\n def __str__(self):\\n return 'DependencyConflict: requested: \\\"{0}\\\" but found: \\\"{1}\\\"'.format(\\n self.required, self.found\\n )\\n\\n\\ndef get_dist_dependency_conflicts(\\n dist: Distribution,\\n) -> Optional[DependencyConflict]:\\n deps = [\\n dep\\n for dep in dist.requires((\\\"instruments\\\",))\\n if dep not in dist.requires()\\n ]\\n return get_dependency_conflicts(deps)\\n\\n\\ndef get_dependency_conflicts(\\n deps: Collection[str],\\n) -> Optional[DependencyConflict]:\\n for dep in deps:\\n try:\\n get_distribution(str(dep))\\n except VersionConflict as exc:\\n return DependencyConflict(dep, exc.dist)\\n except DistributionNotFound:\\n return DependencyConflict(dep)\\n return None\\n\", \"path\": \"opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1741,"string":"1,741"},"num_tokens_diff":{"kind":"number","value":483,"string":"483"}}},{"rowIdx":18138,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_8851"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"yt-dlp__yt-dlp-3789"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nStreamCZ extractor broken\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nworld\n\n### Description\n\nBroken support for stream.cz\r\nExample: https://www.stream.cz/autobazar/vecerni-auto-271357\n\n### Verbose log\n\n```shell\nC:\\Users\\xxx>yt-dlp.lnk https://www.stream.cz/autobazar/vecerni-auto-271357 -U -v\r\n[debug] Command-line config: ['https://www.stream.cz/autobazar/vecerni-auto-271357', '-U', '-v']\r\n[debug] User config \"C:\\Users\\xxx\\yt-dlp.conf\": ['--user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36', '--no-check-certificate', '-o', 'D:\\\\Downloads\\\\yt-dlp\\\\%(autonumber)03d_%(title)s.%(ext)s', '--no-mtime', '--format', 'bestvideo[ext=mp4][height<=1080][fps<=30]+bestaudio[ext=m4a]/best[ext=mp4][height<=1080][fps<=30]/best[height<=1080][fps<=30]/best', '--merge-output-format', 'mkv', '--ffmpeg-location', 'C:\\\\Apps\\\\ffmpeg\\\\bin\\\\ffmpeg.exe', '--all-subs', '-i', '--add-metadata', '--remux-video', 'mkv', '--add-header', 'accept-language: cs,fi;q=0.9,en-US;q=0.8,en;q=0.7,en-GB;q=0.6', '--add-header', 'sec-ch-ua: \"Chromium\";v=\"94\", \"Google Chrome\";v=\"94\", \";Not A Brand\";v=\"99\"', '--add-header', 'sec-ch-ua-arch: \"x86\"', '--add-header', 'sec-ch-ua-bitness: \"64\"', '--add-header', 'sec-ch-ua-full-version: \"94.0.4606.71\"', '--add-header', 'sec-ch-ua-mobile: ?0', '--add-header', 'sec-ch-ua-model: \"\"', '--add-header', 'sec-ch-ua-platform: \"Windows\"', '--add-header', 'sec-ch-ua-platform-version: \"10.0.0\"', '--add-header', 'sec-fetch-dest: document', '--add-header', 'sec-fetch-mode: navigate', '--add-header', 'sec-fetch-site: none', '--add-header', 'sec-fetch-user: ?1', '--add-header', 'service-worker-navigation-preload: true', '--add-header', 'upgrade-insecure-requests: 1', '--add-header', 'alt-svc: h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-T051=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"', '--add-header', 'cache-control: no-cache, no-store, max-age=0, must-revalidate']\r\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252\r\n[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19044-SP0\r\n[debug] Checking exe version: \"C:\\Apps\\ffmpeg\\bin\\ffmpeg.exe\" -bsfs\r\n[debug] Checking exe version: \"C:\\Apps\\ffmpeg\\bin\\ffprobe\" -bsfs\r\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev\r\n[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2022.04.08, Current version: 2022.04.08\r\nyt-dlp is up to date (2022.04.08)\r\n[debug] [StreamCZ] Extracting URL: https://www.stream.cz/autobazar/vecerni-auto-271357\r\n[StreamCZ] 271357: Downloading GraphQL result\r\n[StreamCZ] 271357: Downloading playlist\r\nERROR: 'NoneType' object has no attribute 'items'\r\nTraceback (most recent call last):\r\n File \"yt_dlp\\YoutubeDL.py\", line 1408, in wrapper\r\n File \"yt_dlp\\YoutubeDL.py\", line 1478, in __extract_info\r\n File \"yt_dlp\\extractor\\common.py\", line 641, in extract\r\n File \"yt_dlp\\extractor\\streamcz.py\", line 112, in _real_extract\r\n File \"yt_dlp\\extractor\\streamcz.py\", line 58, in _extract_formats\r\nAttributeError: 'NoneType' object has no attribute 'items'\n```\n\n\n\n\n[start of yt_dlp/extractor/streamcz.py]\n1 import json\n2 \n3 from .common import InfoExtractor\n4 from ..utils import (\n5 float_or_none,\n6 int_or_none,\n7 parse_codecs,\n8 traverse_obj,\n9 urljoin,\n10 )\n11 \n12 \n13 class StreamCZIE(InfoExtractor):\n14 _VALID_URL = r'https?://(?:www\\.)?(?:stream|televizeseznam)\\.cz/[^?#]+/(?P[^?#]+)-(?P[0-9]+)'\n15 _TESTS = [{\n16 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',\n17 'md5': '40c41ade1464a390a0b447e333df4239',\n18 'info_dict': {\n19 'id': '57953890',\n20 'ext': 'mp4',\n21 'title': 'Bůh',\n22 'display_id': 'buh',\n23 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',\n24 'duration': 1369.6,\n25 'view_count': int,\n26 }\n27 }, {\n28 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',\n29 'md5': '41fd358000086a1ccdb068c77809b158',\n30 'info_dict': {\n31 'id': '64087937',\n32 'ext': 'mp4',\n33 'title': 'Kdo to mluví? Velké odhalení přináší nový pořad už od 25. srpna',\n34 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',\n35 'description': 'md5:97a811000a6460266029d6c1c2ebcd59',\n36 'duration': 50.2,\n37 'view_count': int,\n38 }\n39 }, {\n40 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',\n41 'md5': '3ee4d0be040e8f4a543e67e509d55e3f',\n42 'info_dict': {\n43 'id': '64147267',\n44 'ext': 'mp4',\n45 'title': 'Zničehonic jim skrz střechu prolítnul záhadný předmět. Badatelé vše objasnili',\n46 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',\n47 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',\n48 'duration': 442.84,\n49 'view_count': int,\n50 }\n51 }]\n52 \n53 def _extract_formats(self, spl_url, video):\n54 for ext, pref, streams in (\n55 ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\n56 ('mp4', 1, video.get('mp4'))):\n57 for format_id, stream in streams.items():\n58 if not stream.get('url'):\n59 continue\n60 yield {\n61 'format_id': f'{format_id}-{ext}',\n62 'ext': ext,\n63 'source_preference': pref,\n64 'url': urljoin(spl_url, stream['url']),\n65 'tbr': float_or_none(stream.get('bandwidth'), scale=1000),\n66 'duration': float_or_none(stream.get('duration'), scale=1000),\n67 'width': traverse_obj(stream, ('resolution', 0)),\n68 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')),\n69 **parse_codecs(stream.get('codec')),\n70 }\n71 \n72 def _real_extract(self, url):\n73 display_id, video_id = self._match_valid_url(url).groups()\n74 \n75 data = self._download_json(\n76 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',\n77 data=json.dumps({\n78 'variables': {'urlName': video_id},\n79 'query': '''\n80 query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }\n81 fragment VideoDetailFragmentOnEpisode on Episode {\n82 id\n83 spl\n84 urlName\n85 name\n86 perex\n87 duration\n88 views\n89 }'''\n90 }).encode('utf-8'),\n91 headers={'Content-Type': 'application/json;charset=UTF-8'}\n92 )['data']['episode']\n93 \n94 spl_url = data['spl'] + 'spl2,3'\n95 metadata = self._download_json(spl_url, video_id, 'Downloading playlist')\n96 if 'Location' in metadata and 'data' not in metadata:\n97 spl_url = metadata['Location']\n98 metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')\n99 video = metadata['data']\n100 \n101 subtitles = {}\n102 for subs in video.get('subtitles', {}).values():\n103 if not subs.get('language'):\n104 continue\n105 for ext, sub_url in subs.get('urls').items():\n106 subtitles.setdefault(subs['language'], []).append({\n107 'ext': ext,\n108 'url': urljoin(spl_url, sub_url)\n109 })\n110 \n111 formats = list(self._extract_formats(spl_url, video))\n112 self._sort_formats(formats)\n113 \n114 return {\n115 'id': video_id,\n116 'display_id': display_id,\n117 'title': data.get('name'),\n118 'description': data.get('perex'),\n119 'duration': float_or_none(data.get('duration')),\n120 'view_count': int_or_none(data.get('views')),\n121 'formats': formats,\n122 'subtitles': subtitles,\n123 }\n124\n[end of yt_dlp/extractor/streamcz.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/yt_dlp/extractor/streamcz.py b/yt_dlp/extractor/streamcz.py\n--- a/yt_dlp/extractor/streamcz.py\n+++ b/yt_dlp/extractor/streamcz.py\n@@ -52,8 +52,8 @@\n \n def _extract_formats(self, spl_url, video):\n for ext, pref, streams in (\n- ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\n- ('mp4', 1, video.get('mp4'))):\n+ ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}),\n+ ('mp4', 1, video.get('mp4') or {})):\n for format_id, stream in streams.items():\n if not stream.get('url'):\n continue\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/yt_dlp/extractor/streamcz.py b/yt_dlp/extractor/streamcz.py\\n--- a/yt_dlp/extractor/streamcz.py\\n+++ b/yt_dlp/extractor/streamcz.py\\n@@ -52,8 +52,8 @@\\n \\n def _extract_formats(self, spl_url, video):\\n for ext, pref, streams in (\\n- ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\\n- ('mp4', 1, video.get('mp4'))):\\n+ ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}),\\n+ ('mp4', 1, video.get('mp4') or {})):\\n for format_id, stream in streams.items():\\n if not stream.get('url'):\\n continue\\n\", \"issue\": \"StreamCZ extractor broken\\n### Checklist\\n\\n- [X] I'm reporting a broken site\\n- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\\n- [X] I've checked that all provided URLs are alive and playable in a browser\\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\\n\\n### Region\\n\\nworld\\n\\n### Description\\n\\nBroken support for stream.cz\\r\\nExample: https://www.stream.cz/autobazar/vecerni-auto-271357\\n\\n### Verbose log\\n\\n```shell\\nC:\\\\Users\\\\xxx>yt-dlp.lnk https://www.stream.cz/autobazar/vecerni-auto-271357 -U -v\\r\\n[debug] Command-line config: ['https://www.stream.cz/autobazar/vecerni-auto-271357', '-U', '-v']\\r\\n[debug] User config \\\"C:\\\\Users\\\\xxx\\\\yt-dlp.conf\\\": ['--user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36', '--no-check-certificate', '-o', 'D:\\\\\\\\Downloads\\\\\\\\yt-dlp\\\\\\\\%(autonumber)03d_%(title)s.%(ext)s', '--no-mtime', '--format', 'bestvideo[ext=mp4][height<=1080][fps<=30]+bestaudio[ext=m4a]/best[ext=mp4][height<=1080][fps<=30]/best[height<=1080][fps<=30]/best', '--merge-output-format', 'mkv', '--ffmpeg-location', 'C:\\\\\\\\Apps\\\\\\\\ffmpeg\\\\\\\\bin\\\\\\\\ffmpeg.exe', '--all-subs', '-i', '--add-metadata', '--remux-video', 'mkv', '--add-header', 'accept-language: cs,fi;q=0.9,en-US;q=0.8,en;q=0.7,en-GB;q=0.6', '--add-header', 'sec-ch-ua: \\\"Chromium\\\";v=\\\"94\\\", \\\"Google Chrome\\\";v=\\\"94\\\", \\\";Not A Brand\\\";v=\\\"99\\\"', '--add-header', 'sec-ch-ua-arch: \\\"x86\\\"', '--add-header', 'sec-ch-ua-bitness: \\\"64\\\"', '--add-header', 'sec-ch-ua-full-version: \\\"94.0.4606.71\\\"', '--add-header', 'sec-ch-ua-mobile: ?0', '--add-header', 'sec-ch-ua-model: \\\"\\\"', '--add-header', 'sec-ch-ua-platform: \\\"Windows\\\"', '--add-header', 'sec-ch-ua-platform-version: \\\"10.0.0\\\"', '--add-header', 'sec-fetch-dest: document', '--add-header', 'sec-fetch-mode: navigate', '--add-header', 'sec-fetch-site: none', '--add-header', 'sec-fetch-user: ?1', '--add-header', 'service-worker-navigation-preload: true', '--add-header', 'upgrade-insecure-requests: 1', '--add-header', 'alt-svc: h3=\\\":443\\\"; ma=2592000,h3-29=\\\":443\\\"; ma=2592000,h3-T051=\\\":443\\\"; ma=2592000,h3-Q050=\\\":443\\\"; ma=2592000,h3-Q046=\\\":443\\\"; ma=2592000,h3-Q043=\\\":443\\\"; ma=2592000,quic=\\\":443\\\"; ma=2592000; v=\\\"46,43\\\"', '--add-header', 'cache-control: no-cache, no-store, max-age=0, must-revalidate']\\r\\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252\\r\\n[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)\\r\\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19044-SP0\\r\\n[debug] Checking exe version: \\\"C:\\\\Apps\\\\ffmpeg\\\\bin\\\\ffmpeg.exe\\\" -bsfs\\r\\n[debug] Checking exe version: \\\"C:\\\\Apps\\\\ffmpeg\\\\bin\\\\ffprobe\\\" -bsfs\\r\\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev\\r\\n[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets\\r\\n[debug] Proxy map: {}\\r\\nLatest version: 2022.04.08, Current version: 2022.04.08\\r\\nyt-dlp is up to date (2022.04.08)\\r\\n[debug] [StreamCZ] Extracting URL: https://www.stream.cz/autobazar/vecerni-auto-271357\\r\\n[StreamCZ] 271357: Downloading GraphQL result\\r\\n[StreamCZ] 271357: Downloading playlist\\r\\nERROR: 'NoneType' object has no attribute 'items'\\r\\nTraceback (most recent call last):\\r\\n File \\\"yt_dlp\\\\YoutubeDL.py\\\", line 1408, in wrapper\\r\\n File \\\"yt_dlp\\\\YoutubeDL.py\\\", line 1478, in __extract_info\\r\\n File \\\"yt_dlp\\\\extractor\\\\common.py\\\", line 641, in extract\\r\\n File \\\"yt_dlp\\\\extractor\\\\streamcz.py\\\", line 112, in _real_extract\\r\\n File \\\"yt_dlp\\\\extractor\\\\streamcz.py\\\", line 58, in _extract_formats\\r\\nAttributeError: 'NoneType' object has no attribute 'items'\\n```\\n\\n\", \"before_files\": [{\"content\": \"import json\\n\\nfrom .common import InfoExtractor\\nfrom ..utils import (\\n float_or_none,\\n int_or_none,\\n parse_codecs,\\n traverse_obj,\\n urljoin,\\n)\\n\\n\\nclass StreamCZIE(InfoExtractor):\\n _VALID_URL = r'https?://(?:www\\\\.)?(?:stream|televizeseznam)\\\\.cz/[^?#]+/(?P[^?#]+)-(?P[0-9]+)'\\n _TESTS = [{\\n 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',\\n 'md5': '40c41ade1464a390a0b447e333df4239',\\n 'info_dict': {\\n 'id': '57953890',\\n 'ext': 'mp4',\\n 'title': 'B\\u016fh',\\n 'display_id': 'buh',\\n 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',\\n 'duration': 1369.6,\\n 'view_count': int,\\n }\\n }, {\\n 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',\\n 'md5': '41fd358000086a1ccdb068c77809b158',\\n 'info_dict': {\\n 'id': '64087937',\\n 'ext': 'mp4',\\n 'title': 'Kdo to mluv\\u00ed? Velk\\u00e9 odhalen\\u00ed p\\u0159in\\u00e1\\u0161\\u00ed nov\\u00fd po\\u0159ad u\\u017e od 25. srpna',\\n 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',\\n 'description': 'md5:97a811000a6460266029d6c1c2ebcd59',\\n 'duration': 50.2,\\n 'view_count': int,\\n }\\n }, {\\n 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',\\n 'md5': '3ee4d0be040e8f4a543e67e509d55e3f',\\n 'info_dict': {\\n 'id': '64147267',\\n 'ext': 'mp4',\\n 'title': 'Zni\\u010dehonic jim skrz st\\u0159echu prol\\u00edtnul z\\u00e1hadn\\u00fd p\\u0159edm\\u011bt. Badatel\\u00e9 v\\u0161e objasnili',\\n 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',\\n 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',\\n 'duration': 442.84,\\n 'view_count': int,\\n }\\n }]\\n\\n def _extract_formats(self, spl_url, video):\\n for ext, pref, streams in (\\n ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\\n ('mp4', 1, video.get('mp4'))):\\n for format_id, stream in streams.items():\\n if not stream.get('url'):\\n continue\\n yield {\\n 'format_id': f'{format_id}-{ext}',\\n 'ext': ext,\\n 'source_preference': pref,\\n 'url': urljoin(spl_url, stream['url']),\\n 'tbr': float_or_none(stream.get('bandwidth'), scale=1000),\\n 'duration': float_or_none(stream.get('duration'), scale=1000),\\n 'width': traverse_obj(stream, ('resolution', 0)),\\n 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')),\\n **parse_codecs(stream.get('codec')),\\n }\\n\\n def _real_extract(self, url):\\n display_id, video_id = self._match_valid_url(url).groups()\\n\\n data = self._download_json(\\n 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',\\n data=json.dumps({\\n 'variables': {'urlName': video_id},\\n 'query': '''\\n query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }\\n fragment VideoDetailFragmentOnEpisode on Episode {\\n id\\n spl\\n urlName\\n name\\n perex\\n duration\\n views\\n }'''\\n }).encode('utf-8'),\\n headers={'Content-Type': 'application/json;charset=UTF-8'}\\n )['data']['episode']\\n\\n spl_url = data['spl'] + 'spl2,3'\\n metadata = self._download_json(spl_url, video_id, 'Downloading playlist')\\n if 'Location' in metadata and 'data' not in metadata:\\n spl_url = metadata['Location']\\n metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')\\n video = metadata['data']\\n\\n subtitles = {}\\n for subs in video.get('subtitles', {}).values():\\n if not subs.get('language'):\\n continue\\n for ext, sub_url in subs.get('urls').items():\\n subtitles.setdefault(subs['language'], []).append({\\n 'ext': ext,\\n 'url': urljoin(spl_url, sub_url)\\n })\\n\\n formats = list(self._extract_formats(spl_url, video))\\n self._sort_formats(formats)\\n\\n return {\\n 'id': video_id,\\n 'display_id': display_id,\\n 'title': data.get('name'),\\n 'description': data.get('perex'),\\n 'duration': float_or_none(data.get('duration')),\\n 'view_count': int_or_none(data.get('views')),\\n 'formats': formats,\\n 'subtitles': subtitles,\\n }\\n\", \"path\": \"yt_dlp/extractor/streamcz.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3845,"string":"3,845"},"num_tokens_diff":{"kind":"number","value":182,"string":"182"}}},{"rowIdx":18139,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_2571"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"cupy__cupy-2318"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nTypeError for OutOfMemoryError\nSeen while using chainer while multiprocessing and using the GPU:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/lib/python3.6/multiprocessing/pool.py\", line 463, in _handle_results\r\n task = get()\r\n File \"/usr/lib/python3.6/multiprocessing/connection.py\", line 251, in recv\r\n return _ForkingPickler.loads(buf.getbuffer())\r\n File \"cupy/cuda/memory.pyx\", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__\r\nTypeError: __init__() takes exactly 3 positional arguments (2 given)\r\n```\r\nSeems like it tried to raise an OutOfMemoryError but failed to do so.\r\n\r\n```\r\nCuPy Version : 6.1.0\r\nCUDA Root : /usr/local/cuda\r\nCUDA Build Version : 10010\r\nCUDA Driver Version : 10010\r\nCUDA Runtime Version : 10010\r\ncuDNN Build Version : 7500\r\ncuDNN Version : 7500\r\nNCCL Build Version : 2402\r\nNCCL Runtime Version : 2402\r\n```\n\n\n\n[start of cupy/cuda/compiler.py]\n1 import hashlib\n2 import math\n3 import os\n4 import re\n5 import shutil\n6 import sys\n7 import tempfile\n8 \n9 import six\n10 \n11 from cupy.cuda import device\n12 from cupy.cuda import function\n13 from cupy.cuda import nvrtc\n14 \n15 _nvrtc_version = None\n16 _nvrtc_max_compute_capability = None\n17 \n18 \n19 def _get_nvrtc_version():\n20 global _nvrtc_version\n21 if _nvrtc_version is None:\n22 _nvrtc_version = nvrtc.getVersion()\n23 \n24 return _nvrtc_version\n25 \n26 \n27 def _get_arch():\n28 global _nvrtc_max_compute_capability\n29 if _nvrtc_max_compute_capability is None:\n30 # See Supported Compile Options section of NVRTC User Guide for\n31 # the maximum value allowed for `--gpu-architecture`.\n32 major, minor = _get_nvrtc_version()\n33 if major < 9:\n34 # CUDA 7.0 / 7.5 / 8.0\n35 _nvrtc_max_compute_capability = '50'\n36 else:\n37 # CUDA 9.0 / 9.1\n38 _nvrtc_max_compute_capability = '70'\n39 cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability)\n40 return 'compute_%s' % cc\n41 \n42 \n43 class TemporaryDirectory(object):\n44 def __enter__(self):\n45 self.path = tempfile.mkdtemp()\n46 return self.path\n47 \n48 def __exit__(self, exc_type, exc_value, traceback):\n49 if exc_value is not None:\n50 return\n51 \n52 for name in os.listdir(self.path):\n53 os.unlink(os.path.join(self.path, name))\n54 os.rmdir(self.path)\n55 \n56 \n57 def _get_bool_env_variable(name, default):\n58 val = os.environ.get(name)\n59 if val is None or len(val) == 0:\n60 return default\n61 try:\n62 return int(val) == 1\n63 except ValueError:\n64 return False\n65 \n66 \n67 def compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'):\n68 if not arch:\n69 arch = _get_arch()\n70 \n71 options += ('-arch={}'.format(arch),)\n72 \n73 with TemporaryDirectory() as root_dir:\n74 cu_path = os.path.join(root_dir, filename)\n75 \n76 with open(cu_path, 'w') as cu_file:\n77 cu_file.write(source)\n78 \n79 prog = _NVRTCProgram(source, cu_path)\n80 try:\n81 ptx = prog.compile(options)\n82 except CompileException as e:\n83 dump = _get_bool_env_variable(\n84 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n85 if dump:\n86 e.dump(sys.stderr)\n87 raise\n88 \n89 return ptx\n90 \n91 \n92 def _preprocess(source, options, arch):\n93 options += ('-arch={}'.format(arch),)\n94 \n95 prog = _NVRTCProgram(source, '')\n96 try:\n97 result = prog.compile(options)\n98 except CompileException as e:\n99 dump = _get_bool_env_variable(\n100 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n101 if dump:\n102 e.dump(sys.stderr)\n103 raise\n104 \n105 assert isinstance(result, six.text_type)\n106 return result\n107 \n108 \n109 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n110 \n111 \n112 def get_cache_dir():\n113 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n114 \n115 \n116 _empty_file_preprocess_cache = {}\n117 \n118 \n119 def compile_with_cache(source, options=(), arch=None, cache_dir=None,\n120 extra_source=None):\n121 # NVRTC does not use extra_source. extra_source is used for cache key.\n122 global _empty_file_preprocess_cache\n123 if cache_dir is None:\n124 cache_dir = get_cache_dir()\n125 if arch is None:\n126 arch = _get_arch()\n127 \n128 options += ('-ftz=true',)\n129 if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):\n130 options += ('--device-debug', '--generate-line-info')\n131 \n132 env = (arch, options, _get_nvrtc_version())\n133 base = _empty_file_preprocess_cache.get(env, None)\n134 if base is None:\n135 # This is checking of NVRTC compiler internal version\n136 base = _preprocess('', options, arch)\n137 _empty_file_preprocess_cache[env] = base\n138 key_src = '%s %s %s %s' % (env, base, source, extra_source)\n139 \n140 key_src = key_src.encode('utf-8')\n141 name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()\n142 \n143 if not os.path.isdir(cache_dir):\n144 try:\n145 os.makedirs(cache_dir)\n146 except OSError:\n147 if not os.path.isdir(cache_dir):\n148 raise\n149 \n150 mod = function.Module()\n151 # To handle conflicts in concurrent situation, we adopt lock-free method\n152 # to avoid performance degradation.\n153 path = os.path.join(cache_dir, name)\n154 if os.path.exists(path):\n155 with open(path, 'rb') as file:\n156 data = file.read()\n157 if len(data) >= 32:\n158 hash = data[:32]\n159 cubin = data[32:]\n160 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n161 if hash == cubin_hash:\n162 mod.load(cubin)\n163 return mod\n164 \n165 ptx = compile_using_nvrtc(source, options, arch, name + '.cu')\n166 ls = function.LinkState()\n167 ls.add_ptr_data(ptx, u'cupy.ptx')\n168 cubin = ls.complete()\n169 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n170 \n171 # shutil.move is not atomic operation, so it could result in a corrupted\n172 # file. We detect it by appending md5 hash at the beginning of each cache\n173 # file. If the file is corrupted, it will be ignored next time it is read.\n174 with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n175 tf.write(cubin_hash)\n176 tf.write(cubin)\n177 temp_path = tf.name\n178 shutil.move(temp_path, path)\n179 \n180 # Save .cu source file along with .cubin\n181 if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):\n182 with open(path + '.cu', 'w') as f:\n183 f.write(source)\n184 \n185 mod.load(cubin)\n186 return mod\n187 \n188 \n189 class CompileException(Exception):\n190 \n191 def __init__(self, msg, source, name, options):\n192 self._msg = msg\n193 self.source = source\n194 self.name = name\n195 self.options = options\n196 \n197 def __repr__(self):\n198 return str(self)\n199 \n200 def __str__(self):\n201 return self.get_message()\n202 \n203 def get_message(self):\n204 return self._msg\n205 \n206 def dump(self, f):\n207 lines = self.source.split('\\n')\n208 digits = int(math.floor(math.log10(len(lines)))) + 1\n209 linum_fmt = '{{:0{}d}} '.format(digits)\n210 f.write('NVRTC compilation error: {}\\n'.format(self))\n211 f.write('-----\\n')\n212 f.write('Name: {}\\n'.format(self.name))\n213 f.write('Options: {}\\n'.format(' '.join(self.options)))\n214 f.write('CUDA source:\\n')\n215 for i, line in enumerate(lines):\n216 f.write(linum_fmt.format(i + 1) + line.rstrip() + '\\n')\n217 f.write('-----\\n')\n218 f.flush()\n219 \n220 \n221 class _NVRTCProgram(object):\n222 \n223 def __init__(self, src, name='default_program', headers=(),\n224 include_names=()):\n225 self.ptr = None\n226 \n227 if isinstance(src, six.binary_type):\n228 src = src.decode('UTF-8')\n229 if isinstance(name, six.binary_type):\n230 name = name.decode('UTF-8')\n231 \n232 self.src = src\n233 self.name = name\n234 self.ptr = nvrtc.createProgram(src, name, headers, include_names)\n235 \n236 def __del__(self):\n237 if self.ptr:\n238 nvrtc.destroyProgram(self.ptr)\n239 \n240 def compile(self, options=()):\n241 try:\n242 nvrtc.compileProgram(self.ptr, options)\n243 return nvrtc.getPTX(self.ptr)\n244 except nvrtc.NVRTCError:\n245 log = nvrtc.getProgramLog(self.ptr)\n246 raise CompileException(log, self.src, self.name, options)\n247 \n248 \n249 def is_valid_kernel_name(name):\n250 return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None\n251\n[end of cupy/cuda/compiler.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -193,6 +193,10 @@\n self.source = source\n self.name = name\n self.options = options\n+ super(CompileException, self).__init__()\n+\n+ def __reduce__(self):\n+ return (type(self), (self._msg, self.source, self.name, self.options))\n \n def __repr__(self):\n return str(self)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\\n--- a/cupy/cuda/compiler.py\\n+++ b/cupy/cuda/compiler.py\\n@@ -193,6 +193,10 @@\\n self.source = source\\n self.name = name\\n self.options = options\\n+ super(CompileException, self).__init__()\\n+\\n+ def __reduce__(self):\\n+ return (type(self), (self._msg, self.source, self.name, self.options))\\n \\n def __repr__(self):\\n return str(self)\\n\", \"issue\": \"TypeError for OutOfMemoryError\\nSeen while using chainer while multiprocessing and using the GPU:\\r\\n```\\r\\nTraceback (most recent call last):\\r\\n File \\\"/usr/lib/python3.6/threading.py\\\", line 916, in _bootstrap_inner\\r\\n self.run()\\r\\n File \\\"/usr/lib/python3.6/threading.py\\\", line 864, in run\\r\\n self._target(*self._args, **self._kwargs)\\r\\n File \\\"/usr/lib/python3.6/multiprocessing/pool.py\\\", line 463, in _handle_results\\r\\n task = get()\\r\\n File \\\"/usr/lib/python3.6/multiprocessing/connection.py\\\", line 251, in recv\\r\\n return _ForkingPickler.loads(buf.getbuffer())\\r\\n File \\\"cupy/cuda/memory.pyx\\\", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__\\r\\nTypeError: __init__() takes exactly 3 positional arguments (2 given)\\r\\n```\\r\\nSeems like it tried to raise an OutOfMemoryError but failed to do so.\\r\\n\\r\\n```\\r\\nCuPy Version : 6.1.0\\r\\nCUDA Root : /usr/local/cuda\\r\\nCUDA Build Version : 10010\\r\\nCUDA Driver Version : 10010\\r\\nCUDA Runtime Version : 10010\\r\\ncuDNN Build Version : 7500\\r\\ncuDNN Version : 7500\\r\\nNCCL Build Version : 2402\\r\\nNCCL Runtime Version : 2402\\r\\n```\\n\", \"before_files\": [{\"content\": \"import hashlib\\nimport math\\nimport os\\nimport re\\nimport shutil\\nimport sys\\nimport tempfile\\n\\nimport six\\n\\nfrom cupy.cuda import device\\nfrom cupy.cuda import function\\nfrom cupy.cuda import nvrtc\\n\\n_nvrtc_version = None\\n_nvrtc_max_compute_capability = None\\n\\n\\ndef _get_nvrtc_version():\\n global _nvrtc_version\\n if _nvrtc_version is None:\\n _nvrtc_version = nvrtc.getVersion()\\n\\n return _nvrtc_version\\n\\n\\ndef _get_arch():\\n global _nvrtc_max_compute_capability\\n if _nvrtc_max_compute_capability is None:\\n # See Supported Compile Options section of NVRTC User Guide for\\n # the maximum value allowed for `--gpu-architecture`.\\n major, minor = _get_nvrtc_version()\\n if major < 9:\\n # CUDA 7.0 / 7.5 / 8.0\\n _nvrtc_max_compute_capability = '50'\\n else:\\n # CUDA 9.0 / 9.1\\n _nvrtc_max_compute_capability = '70'\\n cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability)\\n return 'compute_%s' % cc\\n\\n\\nclass TemporaryDirectory(object):\\n def __enter__(self):\\n self.path = tempfile.mkdtemp()\\n return self.path\\n\\n def __exit__(self, exc_type, exc_value, traceback):\\n if exc_value is not None:\\n return\\n\\n for name in os.listdir(self.path):\\n os.unlink(os.path.join(self.path, name))\\n os.rmdir(self.path)\\n\\n\\ndef _get_bool_env_variable(name, default):\\n val = os.environ.get(name)\\n if val is None or len(val) == 0:\\n return default\\n try:\\n return int(val) == 1\\n except ValueError:\\n return False\\n\\n\\ndef compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'):\\n if not arch:\\n arch = _get_arch()\\n\\n options += ('-arch={}'.format(arch),)\\n\\n with TemporaryDirectory() as root_dir:\\n cu_path = os.path.join(root_dir, filename)\\n\\n with open(cu_path, 'w') as cu_file:\\n cu_file.write(source)\\n\\n prog = _NVRTCProgram(source, cu_path)\\n try:\\n ptx = prog.compile(options)\\n except CompileException as e:\\n dump = _get_bool_env_variable(\\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\\n if dump:\\n e.dump(sys.stderr)\\n raise\\n\\n return ptx\\n\\n\\ndef _preprocess(source, options, arch):\\n options += ('-arch={}'.format(arch),)\\n\\n prog = _NVRTCProgram(source, '')\\n try:\\n result = prog.compile(options)\\n except CompileException as e:\\n dump = _get_bool_env_variable(\\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\\n if dump:\\n e.dump(sys.stderr)\\n raise\\n\\n assert isinstance(result, six.text_type)\\n return result\\n\\n\\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\\n\\n\\ndef get_cache_dir():\\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\\n\\n\\n_empty_file_preprocess_cache = {}\\n\\n\\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None,\\n extra_source=None):\\n # NVRTC does not use extra_source. extra_source is used for cache key.\\n global _empty_file_preprocess_cache\\n if cache_dir is None:\\n cache_dir = get_cache_dir()\\n if arch is None:\\n arch = _get_arch()\\n\\n options += ('-ftz=true',)\\n if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):\\n options += ('--device-debug', '--generate-line-info')\\n\\n env = (arch, options, _get_nvrtc_version())\\n base = _empty_file_preprocess_cache.get(env, None)\\n if base is None:\\n # This is checking of NVRTC compiler internal version\\n base = _preprocess('', options, arch)\\n _empty_file_preprocess_cache[env] = base\\n key_src = '%s %s %s %s' % (env, base, source, extra_source)\\n\\n key_src = key_src.encode('utf-8')\\n name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()\\n\\n if not os.path.isdir(cache_dir):\\n try:\\n os.makedirs(cache_dir)\\n except OSError:\\n if not os.path.isdir(cache_dir):\\n raise\\n\\n mod = function.Module()\\n # To handle conflicts in concurrent situation, we adopt lock-free method\\n # to avoid performance degradation.\\n path = os.path.join(cache_dir, name)\\n if os.path.exists(path):\\n with open(path, 'rb') as file:\\n data = file.read()\\n if len(data) >= 32:\\n hash = data[:32]\\n cubin = data[32:]\\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\\n if hash == cubin_hash:\\n mod.load(cubin)\\n return mod\\n\\n ptx = compile_using_nvrtc(source, options, arch, name + '.cu')\\n ls = function.LinkState()\\n ls.add_ptr_data(ptx, u'cupy.ptx')\\n cubin = ls.complete()\\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\\n\\n # shutil.move is not atomic operation, so it could result in a corrupted\\n # file. We detect it by appending md5 hash at the beginning of each cache\\n # file. If the file is corrupted, it will be ignored next time it is read.\\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\\n tf.write(cubin_hash)\\n tf.write(cubin)\\n temp_path = tf.name\\n shutil.move(temp_path, path)\\n\\n # Save .cu source file along with .cubin\\n if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):\\n with open(path + '.cu', 'w') as f:\\n f.write(source)\\n\\n mod.load(cubin)\\n return mod\\n\\n\\nclass CompileException(Exception):\\n\\n def __init__(self, msg, source, name, options):\\n self._msg = msg\\n self.source = source\\n self.name = name\\n self.options = options\\n\\n def __repr__(self):\\n return str(self)\\n\\n def __str__(self):\\n return self.get_message()\\n\\n def get_message(self):\\n return self._msg\\n\\n def dump(self, f):\\n lines = self.source.split('\\\\n')\\n digits = int(math.floor(math.log10(len(lines)))) + 1\\n linum_fmt = '{{:0{}d}} '.format(digits)\\n f.write('NVRTC compilation error: {}\\\\n'.format(self))\\n f.write('-----\\\\n')\\n f.write('Name: {}\\\\n'.format(self.name))\\n f.write('Options: {}\\\\n'.format(' '.join(self.options)))\\n f.write('CUDA source:\\\\n')\\n for i, line in enumerate(lines):\\n f.write(linum_fmt.format(i + 1) + line.rstrip() + '\\\\n')\\n f.write('-----\\\\n')\\n f.flush()\\n\\n\\nclass _NVRTCProgram(object):\\n\\n def __init__(self, src, name='default_program', headers=(),\\n include_names=()):\\n self.ptr = None\\n\\n if isinstance(src, six.binary_type):\\n src = src.decode('UTF-8')\\n if isinstance(name, six.binary_type):\\n name = name.decode('UTF-8')\\n\\n self.src = src\\n self.name = name\\n self.ptr = nvrtc.createProgram(src, name, headers, include_names)\\n\\n def __del__(self):\\n if self.ptr:\\n nvrtc.destroyProgram(self.ptr)\\n\\n def compile(self, options=()):\\n try:\\n nvrtc.compileProgram(self.ptr, options)\\n return nvrtc.getPTX(self.ptr)\\n except nvrtc.NVRTCError:\\n log = nvrtc.getProgramLog(self.ptr)\\n raise CompileException(log, self.src, self.name, options)\\n\\n\\ndef is_valid_kernel_name(name):\\n return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None\\n\", \"path\": \"cupy/cuda/compiler.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3359,"string":"3,359"},"num_tokens_diff":{"kind":"number","value":123,"string":"123"}}},{"rowIdx":18140,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_6199"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"microsoft__botbuilder-python-886"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nSkillDialog not working for non-'expected replies' scenario\n## Version\r\n4.8.0\r\n\r\n## Describe the bug\r\nSkillDialog won't work out of the box for non expected-replies scenarios.\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Set up a root bot using skill dialog and a skill bot\r\n2. Run both bots and initiate the SkillDialog\r\n3. When the skill first comes back to the parent an error like the following should arise:\r\n```\r\nFile \"..path-to-botbuilder/botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\", line 213, in _send_to_skill\r\n if not 200 <= response.status <= 299:\r\nAttributeError: 'NoneType' object has no attribute 'status'\r\n```\r\n\r\n## Expected behavior\r\nThe response get back to the parent without any problems\r\n\r\n## Workaround\r\nIf the skill bot is modified to always send some content in every successful response at the route handler level, the scenario should work.\r\n\r\nExample on how to do this for an aiohttp skill bot:\r\n```python\r\n#This is how a typical message handler method could look like\r\nasync def messages(req: Request) -> Response:\r\n # Main bot message handler.\r\n if \"application/json\" in req.headers[\"Content-Type\"]:\r\n body = await req.json()\r\n else:\r\n return Response(status=415)\r\n\r\n activity = Activity().deserialize(body)\r\n auth_header = req.headers[\"Authorization\"] if \"Authorization\" in req.headers else \"\"\r\n\r\n response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)\r\n if response:\r\n return json_response(data=response.body, status=response.status)\r\n # THE FIX IS IN THE LINE BELOW\r\n return Response(status=201, body='{\"foo\":\"bar\"}'.encode(\"utf-8\"))\r\n```\r\n\r\n**Alternative Workaround:** use expected replies as delivery method in the parent bot (SkillDialog).\r\n\r\n[bug]\r\n\n\n\n\n[start of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py]\n1 # Copyright (c) Microsoft Corporation. All rights reserved.\n2 # Licensed under the MIT License.\n3 # pylint: disable=no-member\n4 \n5 import json\n6 from typing import Dict\n7 from logging import Logger\n8 \n9 import aiohttp\n10 from botbuilder.core import InvokeResponse\n11 from botbuilder.core.skills import BotFrameworkClient\n12 from botbuilder.schema import (\n13 Activity,\n14 ExpectedReplies,\n15 ConversationReference,\n16 ConversationAccount,\n17 )\n18 from botframework.connector.auth import (\n19 ChannelProvider,\n20 CredentialProvider,\n21 GovernmentConstants,\n22 MicrosoftAppCredentials,\n23 )\n24 \n25 \n26 class BotFrameworkHttpClient(BotFrameworkClient):\n27 \n28 \"\"\"\n29 A skill host adapter implements API to forward activity to a skill and\n30 implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n31 \"\"\"\n32 \n33 INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n34 _BOT_IDENTITY_KEY = \"BotIdentity\"\n35 _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n36 \n37 def __init__(\n38 self,\n39 credential_provider: CredentialProvider,\n40 channel_provider: ChannelProvider = None,\n41 logger: Logger = None,\n42 ):\n43 if not credential_provider:\n44 raise TypeError(\"credential_provider can't be None\")\n45 \n46 self._credential_provider = credential_provider\n47 self._channel_provider = channel_provider\n48 self._logger = logger\n49 self._session = aiohttp.ClientSession()\n50 \n51 async def post_activity(\n52 self,\n53 from_bot_id: str,\n54 to_bot_id: str,\n55 to_url: str,\n56 service_url: str,\n57 conversation_id: str,\n58 activity: Activity,\n59 ) -> InvokeResponse:\n60 app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n61 \n62 if not app_credentials:\n63 raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n64 \n65 # Get token for the skill call\n66 token = (\n67 app_credentials.get_access_token()\n68 if app_credentials.microsoft_app_id\n69 else None\n70 )\n71 \n72 # Capture current activity settings before changing them.\n73 # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n74 original_conversation_id = activity.conversation.id\n75 original_service_url = activity.service_url\n76 original_caller_id = activity.caller_id\n77 original_relates_to = activity.relates_to\n78 \n79 try:\n80 # TODO: The relato has to be ported to the adapter in the new integration library when\n81 # resolving conflicts in merge\n82 activity.relates_to = ConversationReference(\n83 service_url=activity.service_url,\n84 activity_id=activity.id,\n85 channel_id=activity.channel_id,\n86 conversation=ConversationAccount(\n87 id=activity.conversation.id,\n88 name=activity.conversation.name,\n89 conversation_type=activity.conversation.conversation_type,\n90 aad_object_id=activity.conversation.aad_object_id,\n91 is_group=activity.conversation.is_group,\n92 role=activity.conversation.role,\n93 tenant_id=activity.conversation.tenant_id,\n94 properties=activity.conversation.properties,\n95 ),\n96 bot=None,\n97 )\n98 activity.conversation.id = conversation_id\n99 activity.service_url = service_url\n100 activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n101 \n102 headers_dict = {\n103 \"Content-type\": \"application/json; charset=utf-8\",\n104 }\n105 if token:\n106 headers_dict.update(\n107 {\"Authorization\": f\"Bearer {token}\",}\n108 )\n109 \n110 json_content = json.dumps(activity.serialize())\n111 resp = await self._session.post(\n112 to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n113 )\n114 resp.raise_for_status()\n115 data = (await resp.read()).decode()\n116 content = json.loads(data) if data else None\n117 \n118 if content:\n119 return InvokeResponse(status=resp.status, body=content)\n120 \n121 finally:\n122 # Restore activity properties.\n123 activity.conversation.id = original_conversation_id\n124 activity.service_url = original_service_url\n125 activity.caller_id = original_caller_id\n126 activity.relates_to = original_relates_to\n127 \n128 async def post_buffered_activity(\n129 self,\n130 from_bot_id: str,\n131 to_bot_id: str,\n132 to_url: str,\n133 service_url: str,\n134 conversation_id: str,\n135 activity: Activity,\n136 ) -> [Activity]:\n137 \"\"\"\n138 Helper method to return a list of activities when an Activity is being\n139 sent with DeliveryMode == expectReplies.\n140 \"\"\"\n141 response = await self.post_activity(\n142 from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n143 )\n144 if not response or (response.status / 100) != 2:\n145 return []\n146 return ExpectedReplies().deserialize(response.body).activities\n147 \n148 async def _get_app_credentials(\n149 self, app_id: str, oauth_scope: str\n150 ) -> MicrosoftAppCredentials:\n151 if not app_id:\n152 return MicrosoftAppCredentials(None, None)\n153 \n154 cache_key = f\"{app_id}{oauth_scope}\"\n155 app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n156 \n157 if app_credentials:\n158 return app_credentials\n159 \n160 app_password = await self._credential_provider.get_app_password(app_id)\n161 app_credentials = MicrosoftAppCredentials(\n162 app_id, app_password, oauth_scope=oauth_scope\n163 )\n164 if self._channel_provider and self._channel_provider.is_government():\n165 app_credentials.oauth_endpoint = (\n166 GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n167 )\n168 app_credentials.oauth_scope = (\n169 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n170 )\n171 \n172 BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n173 return app_credentials\n174\n[end of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n@@ -115,8 +115,7 @@\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n \n- if content:\n- return InvokeResponse(status=resp.status, body=content)\n+ return InvokeResponse(status=resp.status, body=content)\n \n finally:\n # Restore activity properties.\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\\n@@ -115,8 +115,7 @@\\n data = (await resp.read()).decode()\\n content = json.loads(data) if data else None\\n \\n- if content:\\n- return InvokeResponse(status=resp.status, body=content)\\n+ return InvokeResponse(status=resp.status, body=content)\\n \\n finally:\\n # Restore activity properties.\\n\", \"issue\": \"SkillDialog not working for non-'expected replies' scenario\\n## Version\\r\\n4.8.0\\r\\n\\r\\n## Describe the bug\\r\\nSkillDialog won't work out of the box for non expected-replies scenarios.\\r\\n\\r\\n## To Reproduce\\r\\nSteps to reproduce the behavior:\\r\\n1. Set up a root bot using skill dialog and a skill bot\\r\\n2. Run both bots and initiate the SkillDialog\\r\\n3. When the skill first comes back to the parent an error like the following should arise:\\r\\n```\\r\\nFile \\\"..path-to-botbuilder/botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\\\", line 213, in _send_to_skill\\r\\n if not 200 <= response.status <= 299:\\r\\nAttributeError: 'NoneType' object has no attribute 'status'\\r\\n```\\r\\n\\r\\n## Expected behavior\\r\\nThe response get back to the parent without any problems\\r\\n\\r\\n## Workaround\\r\\nIf the skill bot is modified to always send some content in every successful response at the route handler level, the scenario should work.\\r\\n\\r\\nExample on how to do this for an aiohttp skill bot:\\r\\n```python\\r\\n#This is how a typical message handler method could look like\\r\\nasync def messages(req: Request) -> Response:\\r\\n # Main bot message handler.\\r\\n if \\\"application/json\\\" in req.headers[\\\"Content-Type\\\"]:\\r\\n body = await req.json()\\r\\n else:\\r\\n return Response(status=415)\\r\\n\\r\\n activity = Activity().deserialize(body)\\r\\n auth_header = req.headers[\\\"Authorization\\\"] if \\\"Authorization\\\" in req.headers else \\\"\\\"\\r\\n\\r\\n response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)\\r\\n if response:\\r\\n return json_response(data=response.body, status=response.status)\\r\\n # THE FIX IS IN THE LINE BELOW\\r\\n return Response(status=201, body='{\\\"foo\\\":\\\"bar\\\"}'.encode(\\\"utf-8\\\"))\\r\\n```\\r\\n\\r\\n**Alternative Workaround:** use expected replies as delivery method in the parent bot (SkillDialog).\\r\\n\\r\\n[bug]\\r\\n\\n\", \"before_files\": [{\"content\": \"# Copyright (c) Microsoft Corporation. All rights reserved.\\n# Licensed under the MIT License.\\n# pylint: disable=no-member\\n\\nimport json\\nfrom typing import Dict\\nfrom logging import Logger\\n\\nimport aiohttp\\nfrom botbuilder.core import InvokeResponse\\nfrom botbuilder.core.skills import BotFrameworkClient\\nfrom botbuilder.schema import (\\n Activity,\\n ExpectedReplies,\\n ConversationReference,\\n ConversationAccount,\\n)\\nfrom botframework.connector.auth import (\\n ChannelProvider,\\n CredentialProvider,\\n GovernmentConstants,\\n MicrosoftAppCredentials,\\n)\\n\\n\\nclass BotFrameworkHttpClient(BotFrameworkClient):\\n\\n \\\"\\\"\\\"\\n A skill host adapter implements API to forward activity to a skill and\\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\\n \\\"\\\"\\\"\\n\\n INVOKE_ACTIVITY_NAME = \\\"SkillEvents.ChannelApiInvoke\\\"\\n _BOT_IDENTITY_KEY = \\\"BotIdentity\\\"\\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\\n\\n def __init__(\\n self,\\n credential_provider: CredentialProvider,\\n channel_provider: ChannelProvider = None,\\n logger: Logger = None,\\n ):\\n if not credential_provider:\\n raise TypeError(\\\"credential_provider can't be None\\\")\\n\\n self._credential_provider = credential_provider\\n self._channel_provider = channel_provider\\n self._logger = logger\\n self._session = aiohttp.ClientSession()\\n\\n async def post_activity(\\n self,\\n from_bot_id: str,\\n to_bot_id: str,\\n to_url: str,\\n service_url: str,\\n conversation_id: str,\\n activity: Activity,\\n ) -> InvokeResponse:\\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\\n\\n if not app_credentials:\\n raise KeyError(\\\"Unable to get appCredentials to connect to the skill\\\")\\n\\n # Get token for the skill call\\n token = (\\n app_credentials.get_access_token()\\n if app_credentials.microsoft_app_id\\n else None\\n )\\n\\n # Capture current activity settings before changing them.\\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\\n original_conversation_id = activity.conversation.id\\n original_service_url = activity.service_url\\n original_caller_id = activity.caller_id\\n original_relates_to = activity.relates_to\\n\\n try:\\n # TODO: The relato has to be ported to the adapter in the new integration library when\\n # resolving conflicts in merge\\n activity.relates_to = ConversationReference(\\n service_url=activity.service_url,\\n activity_id=activity.id,\\n channel_id=activity.channel_id,\\n conversation=ConversationAccount(\\n id=activity.conversation.id,\\n name=activity.conversation.name,\\n conversation_type=activity.conversation.conversation_type,\\n aad_object_id=activity.conversation.aad_object_id,\\n is_group=activity.conversation.is_group,\\n role=activity.conversation.role,\\n tenant_id=activity.conversation.tenant_id,\\n properties=activity.conversation.properties,\\n ),\\n bot=None,\\n )\\n activity.conversation.id = conversation_id\\n activity.service_url = service_url\\n activity.caller_id = f\\\"urn:botframework:aadappid:{from_bot_id}\\\"\\n\\n headers_dict = {\\n \\\"Content-type\\\": \\\"application/json; charset=utf-8\\\",\\n }\\n if token:\\n headers_dict.update(\\n {\\\"Authorization\\\": f\\\"Bearer {token}\\\",}\\n )\\n\\n json_content = json.dumps(activity.serialize())\\n resp = await self._session.post(\\n to_url, data=json_content.encode(\\\"utf-8\\\"), headers=headers_dict,\\n )\\n resp.raise_for_status()\\n data = (await resp.read()).decode()\\n content = json.loads(data) if data else None\\n\\n if content:\\n return InvokeResponse(status=resp.status, body=content)\\n\\n finally:\\n # Restore activity properties.\\n activity.conversation.id = original_conversation_id\\n activity.service_url = original_service_url\\n activity.caller_id = original_caller_id\\n activity.relates_to = original_relates_to\\n\\n async def post_buffered_activity(\\n self,\\n from_bot_id: str,\\n to_bot_id: str,\\n to_url: str,\\n service_url: str,\\n conversation_id: str,\\n activity: Activity,\\n ) -> [Activity]:\\n \\\"\\\"\\\"\\n Helper method to return a list of activities when an Activity is being\\n sent with DeliveryMode == expectReplies.\\n \\\"\\\"\\\"\\n response = await self.post_activity(\\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\\n )\\n if not response or (response.status / 100) != 2:\\n return []\\n return ExpectedReplies().deserialize(response.body).activities\\n\\n async def _get_app_credentials(\\n self, app_id: str, oauth_scope: str\\n ) -> MicrosoftAppCredentials:\\n if not app_id:\\n return MicrosoftAppCredentials(None, None)\\n\\n cache_key = f\\\"{app_id}{oauth_scope}\\\"\\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\\n\\n if app_credentials:\\n return app_credentials\\n\\n app_password = await self._credential_provider.get_app_password(app_id)\\n app_credentials = MicrosoftAppCredentials(\\n app_id, app_password, oauth_scope=oauth_scope\\n )\\n if self._channel_provider and self._channel_provider.is_government():\\n app_credentials.oauth_endpoint = (\\n GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\\n )\\n app_credentials.oauth_scope = (\\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\\n )\\n\\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\\n return app_credentials\\n\", \"path\": \"libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2620,"string":"2,620"},"num_tokens_diff":{"kind":"number","value":187,"string":"187"}}},{"rowIdx":18141,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_8973"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"spesmilo__electrum-2164"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nHTTPS cert expired on LabelSync server\nThe cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working \n\n\n\n[start of plugins/labels/labels.py]\n1 import hashlib\n2 import requests\n3 import threading\n4 import json\n5 import sys\n6 import traceback\n7 \n8 import aes\n9 import base64\n10 \n11 import electrum\n12 from electrum.plugins import BasePlugin, hook\n13 from electrum.i18n import _\n14 \n15 \n16 \n17 \n18 class LabelsPlugin(BasePlugin):\n19 \n20 def __init__(self, parent, config, name):\n21 BasePlugin.__init__(self, parent, config, name)\n22 self.target_host = 'sync.bytesized-hosting.com:9090'\n23 self.wallets = {}\n24 \n25 def encode(self, wallet, msg):\n26 password, iv, wallet_id = self.wallets[wallet]\n27 encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\n28 msg.encode('utf8'))\n29 return base64.b64encode(encrypted)\n30 \n31 def decode(self, wallet, message):\n32 password, iv, wallet_id = self.wallets[wallet]\n33 decoded = base64.b64decode(message)\n34 decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\n35 return decrypted.decode('utf8')\n36 \n37 def get_nonce(self, wallet):\n38 # nonce is the nonce to be used with the next change\n39 nonce = wallet.storage.get('wallet_nonce')\n40 if nonce is None:\n41 nonce = 1\n42 self.set_nonce(wallet, nonce)\n43 return nonce\n44 \n45 def set_nonce(self, wallet, nonce):\n46 self.print_error(\"set\", wallet.basename(), \"nonce to\", nonce)\n47 wallet.storage.put(\"wallet_nonce\", nonce)\n48 \n49 @hook\n50 def set_label(self, wallet, item, label):\n51 if not wallet in self.wallets:\n52 return\n53 nonce = self.get_nonce(wallet)\n54 wallet_id = self.wallets[wallet][2]\n55 bundle = {\"walletId\": wallet_id,\n56 \"walletNonce\": nonce,\n57 \"externalId\": self.encode(wallet, item),\n58 \"encryptedLabel\": self.encode(wallet, label)}\n59 t = threading.Thread(target=self.do_request,\n60 args=[\"POST\", \"/label\", False, bundle])\n61 t.setDaemon(True)\n62 t.start()\n63 # Caller will write the wallet\n64 self.set_nonce(wallet, nonce + 1)\n65 \n66 def do_request(self, method, url = \"/labels\", is_batch=False, data=None):\n67 url = 'https://' + self.target_host + url\n68 kwargs = {'headers': {}}\n69 if method == 'GET' and data:\n70 kwargs['params'] = data\n71 elif method == 'POST' and data:\n72 kwargs['data'] = json.dumps(data)\n73 kwargs['headers']['Content-Type'] = 'application/json'\n74 response = requests.request(method, url, **kwargs)\n75 if response.status_code != 200:\n76 raise BaseException(response.status_code, response.text)\n77 response = response.json()\n78 if \"error\" in response:\n79 raise BaseException(response[\"error\"])\n80 return response\n81 \n82 def push_thread(self, wallet):\n83 wallet_id = self.wallets[wallet][2]\n84 bundle = {\"labels\": [],\n85 \"walletId\": wallet_id,\n86 \"walletNonce\": self.get_nonce(wallet)}\n87 for key, value in wallet.labels.iteritems():\n88 try:\n89 encoded_key = self.encode(wallet, key)\n90 encoded_value = self.encode(wallet, value)\n91 except:\n92 self.print_error('cannot encode', repr(key), repr(value))\n93 continue\n94 bundle[\"labels\"].append({'encryptedLabel': encoded_value,\n95 'externalId': encoded_key})\n96 self.do_request(\"POST\", \"/labels\", True, bundle)\n97 \n98 def pull_thread(self, wallet, force):\n99 wallet_id = self.wallets[wallet][2]\n100 nonce = 1 if force else self.get_nonce(wallet) - 1\n101 self.print_error(\"asking for labels since nonce\", nonce)\n102 try:\n103 response = self.do_request(\"GET\", (\"/labels/since/%d/for/%s\" % (nonce, wallet_id) ))\n104 if response[\"labels\"] is None:\n105 self.print_error('no new labels')\n106 return\n107 result = {}\n108 for label in response[\"labels\"]:\n109 try:\n110 key = self.decode(wallet, label[\"externalId\"])\n111 value = self.decode(wallet, label[\"encryptedLabel\"])\n112 except:\n113 continue\n114 try:\n115 json.dumps(key)\n116 json.dumps(value)\n117 except:\n118 self.print_error('error: no json', key)\n119 continue\n120 result[key] = value\n121 \n122 for key, value in result.items():\n123 if force or not wallet.labels.get(key):\n124 wallet.labels[key] = value\n125 \n126 self.print_error(\"received %d labels\" % len(response))\n127 # do not write to disk because we're in a daemon thread\n128 wallet.storage.put('labels', wallet.labels)\n129 self.set_nonce(wallet, response[\"nonce\"] + 1)\n130 self.on_pulled(wallet)\n131 \n132 except Exception as e:\n133 traceback.print_exc(file=sys.stderr)\n134 self.print_error(\"could not retrieve labels\")\n135 \n136 def start_wallet(self, wallet):\n137 nonce = self.get_nonce(wallet)\n138 self.print_error(\"wallet\", wallet.basename(), \"nonce is\", nonce)\n139 mpk = wallet.get_fingerprint()\n140 if not mpk:\n141 return\n142 password = hashlib.sha1(mpk).digest().encode('hex')[:32]\n143 iv = hashlib.sha256(password).digest()[:16]\n144 wallet_id = hashlib.sha256(mpk).digest().encode('hex')\n145 self.wallets[wallet] = (password, iv, wallet_id)\n146 # If there is an auth token we can try to actually start syncing\n147 t = threading.Thread(target=self.pull_thread, args=(wallet, False))\n148 t.setDaemon(True)\n149 t.start()\n150 \n151 def stop_wallet(self, wallet):\n152 self.wallets.pop(wallet, None)\n153\n[end of plugins/labels/labels.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py\n--- a/plugins/labels/labels.py\n+++ b/plugins/labels/labels.py\n@@ -5,7 +5,6 @@\n import sys\n import traceback\n \n-import aes\n import base64\n \n import electrum\n@@ -19,7 +18,7 @@\n \n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n- self.target_host = 'sync.bytesized-hosting.com:9090'\n+ self.target_host = 'labels.bauerj.eu'\n self.wallets = {}\n \n def encode(self, wallet, msg):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py\\n--- a/plugins/labels/labels.py\\n+++ b/plugins/labels/labels.py\\n@@ -5,7 +5,6 @@\\n import sys\\n import traceback\\n \\n-import aes\\n import base64\\n \\n import electrum\\n@@ -19,7 +18,7 @@\\n \\n def __init__(self, parent, config, name):\\n BasePlugin.__init__(self, parent, config, name)\\n- self.target_host = 'sync.bytesized-hosting.com:9090'\\n+ self.target_host = 'labels.bauerj.eu'\\n self.wallets = {}\\n \\n def encode(self, wallet, msg):\\n\", \"issue\": \"HTTPS cert expired on LabelSync server\\nThe cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working \\n\", \"before_files\": [{\"content\": \"import hashlib\\nimport requests\\nimport threading\\nimport json\\nimport sys\\nimport traceback\\n\\nimport aes\\nimport base64\\n\\nimport electrum\\nfrom electrum.plugins import BasePlugin, hook\\nfrom electrum.i18n import _\\n\\n\\n\\n\\nclass LabelsPlugin(BasePlugin):\\n\\n def __init__(self, parent, config, name):\\n BasePlugin.__init__(self, parent, config, name)\\n self.target_host = 'sync.bytesized-hosting.com:9090'\\n self.wallets = {}\\n\\n def encode(self, wallet, msg):\\n password, iv, wallet_id = self.wallets[wallet]\\n encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\\n msg.encode('utf8'))\\n return base64.b64encode(encrypted)\\n\\n def decode(self, wallet, message):\\n password, iv, wallet_id = self.wallets[wallet]\\n decoded = base64.b64decode(message)\\n decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\\n return decrypted.decode('utf8')\\n\\n def get_nonce(self, wallet):\\n # nonce is the nonce to be used with the next change\\n nonce = wallet.storage.get('wallet_nonce')\\n if nonce is None:\\n nonce = 1\\n self.set_nonce(wallet, nonce)\\n return nonce\\n\\n def set_nonce(self, wallet, nonce):\\n self.print_error(\\\"set\\\", wallet.basename(), \\\"nonce to\\\", nonce)\\n wallet.storage.put(\\\"wallet_nonce\\\", nonce)\\n\\n @hook\\n def set_label(self, wallet, item, label):\\n if not wallet in self.wallets:\\n return\\n nonce = self.get_nonce(wallet)\\n wallet_id = self.wallets[wallet][2]\\n bundle = {\\\"walletId\\\": wallet_id,\\n \\\"walletNonce\\\": nonce,\\n \\\"externalId\\\": self.encode(wallet, item),\\n \\\"encryptedLabel\\\": self.encode(wallet, label)}\\n t = threading.Thread(target=self.do_request,\\n args=[\\\"POST\\\", \\\"/label\\\", False, bundle])\\n t.setDaemon(True)\\n t.start()\\n # Caller will write the wallet\\n self.set_nonce(wallet, nonce + 1)\\n\\n def do_request(self, method, url = \\\"/labels\\\", is_batch=False, data=None):\\n url = 'https://' + self.target_host + url\\n kwargs = {'headers': {}}\\n if method == 'GET' and data:\\n kwargs['params'] = data\\n elif method == 'POST' and data:\\n kwargs['data'] = json.dumps(data)\\n kwargs['headers']['Content-Type'] = 'application/json'\\n response = requests.request(method, url, **kwargs)\\n if response.status_code != 200:\\n raise BaseException(response.status_code, response.text)\\n response = response.json()\\n if \\\"error\\\" in response:\\n raise BaseException(response[\\\"error\\\"])\\n return response\\n\\n def push_thread(self, wallet):\\n wallet_id = self.wallets[wallet][2]\\n bundle = {\\\"labels\\\": [],\\n \\\"walletId\\\": wallet_id,\\n \\\"walletNonce\\\": self.get_nonce(wallet)}\\n for key, value in wallet.labels.iteritems():\\n try:\\n encoded_key = self.encode(wallet, key)\\n encoded_value = self.encode(wallet, value)\\n except:\\n self.print_error('cannot encode', repr(key), repr(value))\\n continue\\n bundle[\\\"labels\\\"].append({'encryptedLabel': encoded_value,\\n 'externalId': encoded_key})\\n self.do_request(\\\"POST\\\", \\\"/labels\\\", True, bundle)\\n\\n def pull_thread(self, wallet, force):\\n wallet_id = self.wallets[wallet][2]\\n nonce = 1 if force else self.get_nonce(wallet) - 1\\n self.print_error(\\\"asking for labels since nonce\\\", nonce)\\n try:\\n response = self.do_request(\\\"GET\\\", (\\\"/labels/since/%d/for/%s\\\" % (nonce, wallet_id) ))\\n if response[\\\"labels\\\"] is None:\\n self.print_error('no new labels')\\n return\\n result = {}\\n for label in response[\\\"labels\\\"]:\\n try:\\n key = self.decode(wallet, label[\\\"externalId\\\"])\\n value = self.decode(wallet, label[\\\"encryptedLabel\\\"])\\n except:\\n continue\\n try:\\n json.dumps(key)\\n json.dumps(value)\\n except:\\n self.print_error('error: no json', key)\\n continue\\n result[key] = value\\n\\n for key, value in result.items():\\n if force or not wallet.labels.get(key):\\n wallet.labels[key] = value\\n\\n self.print_error(\\\"received %d labels\\\" % len(response))\\n # do not write to disk because we're in a daemon thread\\n wallet.storage.put('labels', wallet.labels)\\n self.set_nonce(wallet, response[\\\"nonce\\\"] + 1)\\n self.on_pulled(wallet)\\n\\n except Exception as e:\\n traceback.print_exc(file=sys.stderr)\\n self.print_error(\\\"could not retrieve labels\\\")\\n\\n def start_wallet(self, wallet):\\n nonce = self.get_nonce(wallet)\\n self.print_error(\\\"wallet\\\", wallet.basename(), \\\"nonce is\\\", nonce)\\n mpk = wallet.get_fingerprint()\\n if not mpk:\\n return\\n password = hashlib.sha1(mpk).digest().encode('hex')[:32]\\n iv = hashlib.sha256(password).digest()[:16]\\n wallet_id = hashlib.sha256(mpk).digest().encode('hex')\\n self.wallets[wallet] = (password, iv, wallet_id)\\n # If there is an auth token we can try to actually start syncing\\n t = threading.Thread(target=self.pull_thread, args=(wallet, False))\\n t.setDaemon(True)\\n t.start()\\n\\n def stop_wallet(self, wallet):\\n self.wallets.pop(wallet, None)\\n\", \"path\": \"plugins/labels/labels.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2159,"string":"2,159"},"num_tokens_diff":{"kind":"number","value":152,"string":"152"}}},{"rowIdx":18142,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_29856"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"mesonbuild__meson-9174"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nGet the i18n targets inside the meson files\nI need to generate a file from the i18n *.gmo files. This `custom_target` needs to depend on the `-gmo` target documented [here](https://mesonbuild.com/i18n-module.html).\r\n\r\nThis target should be retrievable through a method like :\r\n```\r\ngettext_targets = i18n.gettext('mypackage')\r\ncustom_target(\r\n depends: gettext_targets.target_gmo()\r\n)\r\n```\r\n\r\nFor now, I think the only way to achieve that is something like that :\r\n```\r\nninja_gmo = custom_target(command: [ 'ninja', '-C' meson.build_root(), 'mypackage-gmo'])\r\n```\n\n\n\n[start of mesonbuild/modules/i18n.py]\n1 # Copyright 2016 The Meson development team\n2 \n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 \n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 \n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 \n15 import shutil\n16 \n17 from os import path\n18 from .. import coredata, mesonlib, build, mlog\n19 from ..mesonlib import MesonException\n20 from ..scripts.gettext import read_linguas\n21 from . import ModuleReturnValue\n22 from . import ExtensionModule\n23 from ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs\n24 \n25 PRESET_ARGS = {\n26 'glib': [\n27 '--from-code=UTF-8',\n28 '--add-comments',\n29 \n30 # https://developer.gnome.org/glib/stable/glib-I18N.html\n31 '--keyword=_',\n32 '--keyword=N_',\n33 '--keyword=C_:1c,2',\n34 '--keyword=NC_:1c,2',\n35 '--keyword=g_dcgettext:2',\n36 '--keyword=g_dngettext:2,3',\n37 '--keyword=g_dpgettext2:2c,3',\n38 \n39 '--flag=N_:1:pass-c-format',\n40 '--flag=C_:2:pass-c-format',\n41 '--flag=NC_:2:pass-c-format',\n42 '--flag=g_dngettext:2:pass-c-format',\n43 '--flag=g_strdup_printf:1:c-format',\n44 '--flag=g_string_printf:2:c-format',\n45 '--flag=g_string_append_printf:2:c-format',\n46 '--flag=g_error_new:3:c-format',\n47 '--flag=g_set_error:4:c-format',\n48 '--flag=g_markup_printf_escaped:1:c-format',\n49 '--flag=g_log:3:c-format',\n50 '--flag=g_print:1:c-format',\n51 '--flag=g_printerr:1:c-format',\n52 '--flag=g_printf:1:c-format',\n53 '--flag=g_fprintf:2:c-format',\n54 '--flag=g_sprintf:2:c-format',\n55 '--flag=g_snprintf:3:c-format',\n56 ]\n57 }\n58 \n59 \n60 class I18nModule(ExtensionModule):\n61 def __init__(self, interpreter):\n62 super().__init__(interpreter)\n63 self.methods.update({\n64 'merge_file': self.merge_file,\n65 'gettext': self.gettext,\n66 })\n67 \n68 @staticmethod\n69 def nogettext_warning():\n70 mlog.warning('Gettext not found, all translation targets will be ignored.', once=True)\n71 \n72 @staticmethod\n73 def _get_data_dirs(state, dirs):\n74 \"\"\"Returns source directories of relative paths\"\"\"\n75 src_dir = path.join(state.environment.get_source_dir(), state.subdir)\n76 return [path.join(src_dir, d) for d in dirs]\n77 \n78 @FeatureNew('i18n.merge_file', '0.37.0')\n79 @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args'])\n80 @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'})\n81 def merge_file(self, state, args, kwargs):\n82 if not shutil.which('xgettext'):\n83 self.nogettext_warning()\n84 return\n85 podir = kwargs.pop('po_dir', None)\n86 if not podir:\n87 raise MesonException('i18n: po_dir is a required kwarg')\n88 podir = path.join(state.build_to_src, state.subdir, podir)\n89 \n90 file_type = kwargs.pop('type', 'xml')\n91 VALID_TYPES = ('xml', 'desktop')\n92 if file_type not in VALID_TYPES:\n93 raise MesonException(f'i18n: \"{file_type}\" is not a valid type {VALID_TYPES}')\n94 \n95 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))\n96 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n97 \n98 command = state.environment.get_build_command() + [\n99 '--internal', 'msgfmthelper',\n100 '@INPUT@', '@OUTPUT@', file_type, podir\n101 ]\n102 if datadirs:\n103 command.append(datadirs)\n104 \n105 if 'args' in kwargs:\n106 command.append('--')\n107 command.append(mesonlib.stringlistify(kwargs.pop('args', [])))\n108 \n109 kwargs['command'] = command\n110 \n111 # We only use this input file to create a name of the custom target.\n112 # Thus we can ignore the other entries.\n113 inputfile = mesonlib.extract_as_list(kwargs, 'input')[0]\n114 if isinstance(inputfile, str):\n115 inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\n116 state.subdir, inputfile)\n117 if isinstance(inputfile, mesonlib.File):\n118 # output could be '@BASENAME@' in which case we need to do substitutions\n119 # to get a unique target name.\n120 output = kwargs['output']\n121 ifile_abs = inputfile.absolute_path(state.environment.source_dir,\n122 state.environment.build_dir)\n123 values = mesonlib.get_filenames_templates_dict([ifile_abs], None)\n124 outputs = mesonlib.substitute_values([output], values)\n125 output = outputs[0]\n126 ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\\\', '@') + '_merge', state.subdir, state.subproject, kwargs)\n127 else:\n128 ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\n129 \n130 return ModuleReturnValue(ct, [ct])\n131 \n132 @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])\n133 @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir'])\n134 @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'})\n135 def gettext(self, state, args, kwargs):\n136 if len(args) != 1:\n137 raise coredata.MesonException('Gettext requires one positional argument (package name).')\n138 if not shutil.which('xgettext'):\n139 self.nogettext_warning()\n140 return\n141 packagename = args[0]\n142 languages = mesonlib.stringlistify(kwargs.get('languages', []))\n143 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n144 extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n145 targets = []\n146 \n147 preset = kwargs.pop('preset', None)\n148 if preset:\n149 preset_args = PRESET_ARGS.get(preset)\n150 if not preset_args:\n151 raise coredata.MesonException('i18n: Preset \"{}\" is not one of the valid options: {}'.format(\n152 preset, list(PRESET_ARGS.keys())))\n153 extra_args = set(preset_args + extra_args)\n154 \n155 pkg_arg = '--pkgname=' + packagename\n156 lang_arg = '--langs=' + '@@'.join(languages) if languages else None\n157 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n158 extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\n159 \n160 potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]\n161 if datadirs:\n162 potargs.append(datadirs)\n163 if extra_args:\n164 potargs.append(extra_args)\n165 pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)\n166 targets.append(pottarget)\n167 \n168 install = kwargs.get('install', True)\n169 install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir')))\n170 if not languages:\n171 languages = read_linguas(path.join(state.environment.source_dir, state.subdir))\n172 for l in languages:\n173 po_file = mesonlib.File.from_source_file(state.environment.source_dir,\n174 state.subdir, l+'.po')\n175 gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'],\n176 'input': po_file,\n177 'output': packagename+'.mo',\n178 'install': install,\n179 # We have multiple files all installed as packagename+'.mo' in different install subdirs.\n180 # What we really wanted to do, probably, is have a rename: kwarg, but that's not available\n181 # to custom_targets. Crude hack: set the build target's subdir manually.\n182 # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.\n183 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'),\n184 'install_tag': 'i18n',\n185 }\n186 gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n187 targets.append(gmotarget)\n188 \n189 updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n190 if lang_arg:\n191 updatepoargs.append(lang_arg)\n192 if datadirs:\n193 updatepoargs.append(datadirs)\n194 if extra_args:\n195 updatepoargs.append(extra_args)\n196 updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n197 targets.append(updatepotarget)\n198 \n199 return ModuleReturnValue(None, targets)\n200 \n201 def initialize(*args, **kwargs):\n202 return I18nModule(*args, **kwargs)\n203\n[end of mesonbuild/modules/i18n.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py\n--- a/mesonbuild/modules/i18n.py\n+++ b/mesonbuild/modules/i18n.py\n@@ -143,6 +143,7 @@\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n targets = []\n+ gmotargets = []\n \n preset = kwargs.pop('preset', None)\n if preset:\n@@ -185,6 +186,10 @@\n }\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n targets.append(gmotarget)\n+ gmotargets.append(gmotarget)\n+\n+ allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)\n+ targets.append(allgmotarget)\n \n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n@@ -196,7 +201,7 @@\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n targets.append(updatepotarget)\n \n- return ModuleReturnValue(None, targets)\n+ return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)\n \n def initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py\\n--- a/mesonbuild/modules/i18n.py\\n+++ b/mesonbuild/modules/i18n.py\\n@@ -143,6 +143,7 @@\\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\\n targets = []\\n+ gmotargets = []\\n \\n preset = kwargs.pop('preset', None)\\n if preset:\\n@@ -185,6 +186,10 @@\\n }\\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\\n targets.append(gmotarget)\\n+ gmotargets.append(gmotarget)\\n+\\n+ allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)\\n+ targets.append(allgmotarget)\\n \\n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\\n if lang_arg:\\n@@ -196,7 +201,7 @@\\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\\n targets.append(updatepotarget)\\n \\n- return ModuleReturnValue(None, targets)\\n+ return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)\\n \\n def initialize(*args, **kwargs):\\n return I18nModule(*args, **kwargs)\\n\", \"issue\": \"Get the i18n targets inside the meson files\\nI need to generate a file from the i18n *.gmo files. This `custom_target` needs to depend on the `-gmo` target documented [here](https://mesonbuild.com/i18n-module.html).\\r\\n\\r\\nThis target should be retrievable through a method like :\\r\\n```\\r\\ngettext_targets = i18n.gettext('mypackage')\\r\\ncustom_target(\\r\\n depends: gettext_targets.target_gmo()\\r\\n)\\r\\n```\\r\\n\\r\\nFor now, I think the only way to achieve that is something like that :\\r\\n```\\r\\nninja_gmo = custom_target(command:\\u00a0[ 'ninja', '-C' meson.build_root(), 'mypackage-gmo'])\\r\\n```\\n\", \"before_files\": [{\"content\": \"# Copyright 2016 The Meson development team\\n\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n\\n# http://www.apache.org/licenses/LICENSE-2.0\\n\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\nimport shutil\\n\\nfrom os import path\\nfrom .. import coredata, mesonlib, build, mlog\\nfrom ..mesonlib import MesonException\\nfrom ..scripts.gettext import read_linguas\\nfrom . import ModuleReturnValue\\nfrom . import ExtensionModule\\nfrom ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs\\n\\nPRESET_ARGS = {\\n 'glib': [\\n '--from-code=UTF-8',\\n '--add-comments',\\n\\n # https://developer.gnome.org/glib/stable/glib-I18N.html\\n '--keyword=_',\\n '--keyword=N_',\\n '--keyword=C_:1c,2',\\n '--keyword=NC_:1c,2',\\n '--keyword=g_dcgettext:2',\\n '--keyword=g_dngettext:2,3',\\n '--keyword=g_dpgettext2:2c,3',\\n\\n '--flag=N_:1:pass-c-format',\\n '--flag=C_:2:pass-c-format',\\n '--flag=NC_:2:pass-c-format',\\n '--flag=g_dngettext:2:pass-c-format',\\n '--flag=g_strdup_printf:1:c-format',\\n '--flag=g_string_printf:2:c-format',\\n '--flag=g_string_append_printf:2:c-format',\\n '--flag=g_error_new:3:c-format',\\n '--flag=g_set_error:4:c-format',\\n '--flag=g_markup_printf_escaped:1:c-format',\\n '--flag=g_log:3:c-format',\\n '--flag=g_print:1:c-format',\\n '--flag=g_printerr:1:c-format',\\n '--flag=g_printf:1:c-format',\\n '--flag=g_fprintf:2:c-format',\\n '--flag=g_sprintf:2:c-format',\\n '--flag=g_snprintf:3:c-format',\\n ]\\n}\\n\\n\\nclass I18nModule(ExtensionModule):\\n def __init__(self, interpreter):\\n super().__init__(interpreter)\\n self.methods.update({\\n 'merge_file': self.merge_file,\\n 'gettext': self.gettext,\\n })\\n\\n @staticmethod\\n def nogettext_warning():\\n mlog.warning('Gettext not found, all translation targets will be ignored.', once=True)\\n\\n @staticmethod\\n def _get_data_dirs(state, dirs):\\n \\\"\\\"\\\"Returns source directories of relative paths\\\"\\\"\\\"\\n src_dir = path.join(state.environment.get_source_dir(), state.subdir)\\n return [path.join(src_dir, d) for d in dirs]\\n\\n @FeatureNew('i18n.merge_file', '0.37.0')\\n @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args'])\\n @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'})\\n def merge_file(self, state, args, kwargs):\\n if not shutil.which('xgettext'):\\n self.nogettext_warning()\\n return\\n podir = kwargs.pop('po_dir', None)\\n if not podir:\\n raise MesonException('i18n: po_dir is a required kwarg')\\n podir = path.join(state.build_to_src, state.subdir, podir)\\n\\n file_type = kwargs.pop('type', 'xml')\\n VALID_TYPES = ('xml', 'desktop')\\n if file_type not in VALID_TYPES:\\n raise MesonException(f'i18n: \\\"{file_type}\\\" is not a valid type {VALID_TYPES}')\\n\\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))\\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\\n\\n command = state.environment.get_build_command() + [\\n '--internal', 'msgfmthelper',\\n '@INPUT@', '@OUTPUT@', file_type, podir\\n ]\\n if datadirs:\\n command.append(datadirs)\\n\\n if 'args' in kwargs:\\n command.append('--')\\n command.append(mesonlib.stringlistify(kwargs.pop('args', [])))\\n\\n kwargs['command'] = command\\n\\n # We only use this input file to create a name of the custom target.\\n # Thus we can ignore the other entries.\\n inputfile = mesonlib.extract_as_list(kwargs, 'input')[0]\\n if isinstance(inputfile, str):\\n inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\\n state.subdir, inputfile)\\n if isinstance(inputfile, mesonlib.File):\\n # output could be '@BASENAME@' in which case we need to do substitutions\\n # to get a unique target name.\\n output = kwargs['output']\\n ifile_abs = inputfile.absolute_path(state.environment.source_dir,\\n state.environment.build_dir)\\n values = mesonlib.get_filenames_templates_dict([ifile_abs], None)\\n outputs = mesonlib.substitute_values([output], values)\\n output = outputs[0]\\n ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\\\\\\\', '@') + '_merge', state.subdir, state.subproject, kwargs)\\n else:\\n ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\\n\\n return ModuleReturnValue(ct, [ct])\\n\\n @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])\\n @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir'])\\n @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'})\\n def gettext(self, state, args, kwargs):\\n if len(args) != 1:\\n raise coredata.MesonException('Gettext requires one positional argument (package name).')\\n if not shutil.which('xgettext'):\\n self.nogettext_warning()\\n return\\n packagename = args[0]\\n languages = mesonlib.stringlistify(kwargs.get('languages', []))\\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\\n targets = []\\n\\n preset = kwargs.pop('preset', None)\\n if preset:\\n preset_args = PRESET_ARGS.get(preset)\\n if not preset_args:\\n raise coredata.MesonException('i18n: Preset \\\"{}\\\" is not one of the valid options: {}'.format(\\n preset, list(PRESET_ARGS.keys())))\\n extra_args = set(preset_args + extra_args)\\n\\n pkg_arg = '--pkgname=' + packagename\\n lang_arg = '--langs=' + '@@'.join(languages) if languages else None\\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\\n extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\\n\\n potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]\\n if datadirs:\\n potargs.append(datadirs)\\n if extra_args:\\n potargs.append(extra_args)\\n pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)\\n targets.append(pottarget)\\n\\n install = kwargs.get('install', True)\\n install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir')))\\n if not languages:\\n languages = read_linguas(path.join(state.environment.source_dir, state.subdir))\\n for l in languages:\\n po_file = mesonlib.File.from_source_file(state.environment.source_dir,\\n state.subdir, l+'.po')\\n gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'],\\n 'input': po_file,\\n 'output': packagename+'.mo',\\n 'install': install,\\n # We have multiple files all installed as packagename+'.mo' in different install subdirs.\\n # What we really wanted to do, probably, is have a rename: kwarg, but that's not available\\n # to custom_targets. Crude hack: set the build target's subdir manually.\\n # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.\\n 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'),\\n 'install_tag': 'i18n',\\n }\\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\\n targets.append(gmotarget)\\n\\n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\\n if lang_arg:\\n updatepoargs.append(lang_arg)\\n if datadirs:\\n updatepoargs.append(datadirs)\\n if extra_args:\\n updatepoargs.append(extra_args)\\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\\n targets.append(updatepotarget)\\n\\n return ModuleReturnValue(None, targets)\\n\\ndef initialize(*args, **kwargs):\\n return I18nModule(*args, **kwargs)\\n\", \"path\": \"mesonbuild/modules/i18n.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3350,"string":"3,350"},"num_tokens_diff":{"kind":"number","value":376,"string":"376"}}},{"rowIdx":18143,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_9970"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"aws-cloudformation__cfn-lint-770"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nE2532 when Step Functions Pass state has Parameters key\n*cfn-lint version: 0.17.0*\r\n\r\nUsing latest version of cfn-lint and updated spec files.\r\n\r\n*Description of issue.*\r\n\r\nLinting a CF template fails when a `AWS::StepFunctions::StateMachine`'s `DefinitionString` has a `Pass` state with the `Parameters` key.\r\n\r\nExample template:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n ExampleStateMachine:\r\n Type: AWS::StepFunctions::StateMachine\r\n Properties:\r\n DefinitionString: |-\r\n {\r\n \"StartAt\": \"Pop Element from List\",\r\n \"States\": {\r\n \"Pop Element from List\": {\r\n \"Type\": \"Pass\",\r\n \"Parameters\": {\r\n \"List.$\": \"$.List[1:]\"\r\n },\r\n \"End\": true\r\n }\r\n }\r\n }\r\n RoleArn: redacted\r\n```\r\n\r\nError:\r\n\r\n```\r\nE2532 State Machine Definition key (Parameters) for State (Pop Element from List) of Type (Pass) is not valid\r\nexample.yaml:6:13\r\n```\r\n\r\nHowever, the above `Pass` state conforms to the ASL [spec](https://states-language.net/spec.html#state-type-table) and [docs](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) and comes directly from the [Transfer Data Records sample project](https://docs.aws.amazon.com/step-functions/latest/dg/sample-project-transfer-data-sqs.html).\n\n\n\n[start of src/cfnlint/rules/resources/stepfunctions/StateMachine.py]\n1 \"\"\"\n2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n3 \n4 Permission is hereby granted, free of charge, to any person obtaining a copy of this\n5 software and associated documentation files (the \"Software\"), to deal in the Software\n6 without restriction, including without limitation the rights to use, copy, modify,\n7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n8 permit persons to whom the Software is furnished to do so.\n9 \n10 THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n16 \"\"\"\n17 import json\n18 import six\n19 from cfnlint import CloudFormationLintRule\n20 from cfnlint import RuleMatch\n21 \n22 \n23 class StateMachine(CloudFormationLintRule):\n24 \"\"\"Check State Machine Definition\"\"\"\n25 id = 'E2532'\n26 shortdesc = 'Check State Machine Definition for proper syntax'\n27 description = 'Check the State Machine String Definition to make sure its JSON. ' \\\n28 'Validate basic syntax of the file to determine validity.'\n29 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'\n30 tags = ['resources', 'stepfunctions']\n31 \n32 def __init__(self):\n33 \"\"\"Init\"\"\"\n34 super(StateMachine, self).__init__()\n35 self.resource_property_types.append('AWS::StepFunctions::StateMachine')\n36 \n37 def _check_state_json(self, def_json, state_name, path):\n38 \"\"\"Check State JSON Definition\"\"\"\n39 matches = []\n40 \n41 # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n42 common_state_keys = [\n43 'Next',\n44 'End',\n45 'Type',\n46 'Comment',\n47 'InputPath',\n48 'OutputPath',\n49 ]\n50 common_state_required_keys = [\n51 'Type',\n52 ]\n53 state_key_types = {\n54 'Pass': ['Result', 'ResultPath'],\n55 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n56 'Choice': ['Choices', 'Default'],\n57 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n58 'Succeed': [],\n59 'Fail': ['Cause', 'Error'],\n60 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']\n61 }\n62 state_required_types = {\n63 'Pass': [],\n64 'Task': ['Resource'],\n65 'Choice': ['Choices'],\n66 'Wait': [],\n67 'Succeed': [],\n68 'Fail': [],\n69 'Parallel': ['Branches']\n70 }\n71 \n72 for req_key in common_state_required_keys:\n73 if req_key not in def_json:\n74 message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)\n75 matches.append(RuleMatch(path, message))\n76 return matches\n77 \n78 state_type = def_json.get('Type')\n79 \n80 if state_type in state_key_types:\n81 for state_key, _ in def_json.items():\n82 if state_key not in common_state_keys + state_key_types.get(state_type, []):\n83 message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)\n84 matches.append(RuleMatch(path, message))\n85 for req_key in common_state_required_keys + state_required_types.get(state_type, []):\n86 if req_key not in def_json:\n87 message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)\n88 matches.append(RuleMatch(path, message))\n89 return matches\n90 else:\n91 message = 'State Machine Definition Type (%s) is not valid' % (state_type)\n92 matches.append(RuleMatch(path, message))\n93 \n94 return matches\n95 \n96 def _check_definition_json(self, def_json, path):\n97 \"\"\"Check JSON Definition\"\"\"\n98 matches = []\n99 \n100 top_level_keys = [\n101 'Comment',\n102 'StartAt',\n103 'TimeoutSeconds',\n104 'Version',\n105 'States'\n106 ]\n107 top_level_required_keys = [\n108 'StartAt',\n109 'States'\n110 ]\n111 for top_key, _ in def_json.items():\n112 if top_key not in top_level_keys:\n113 message = 'State Machine Definition key (%s) is not valid' % top_key\n114 matches.append(RuleMatch(path, message))\n115 \n116 for req_key in top_level_required_keys:\n117 if req_key not in def_json:\n118 message = 'State Machine Definition required key (%s) is missing' % req_key\n119 matches.append(RuleMatch(path, message))\n120 \n121 for state_name, state_value in def_json.get('States', {}).items():\n122 matches.extend(self._check_state_json(state_value, state_name, path))\n123 return matches\n124 \n125 def check_value(self, value, path, fail_on_loads=True):\n126 \"\"\"Check Definition Value\"\"\"\n127 matches = []\n128 try:\n129 def_json = json.loads(value)\n130 # pylint: disable=W0703\n131 except Exception as err:\n132 if fail_on_loads:\n133 message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err\n134 matches.append(RuleMatch(path, message))\n135 return matches\n136 \n137 self.logger.debug('State Machine definition could not be parsed. Skipping')\n138 return matches\n139 \n140 matches.extend(self._check_definition_json(def_json, path))\n141 return matches\n142 \n143 def check_sub(self, value, path):\n144 \"\"\"Check Sub Object\"\"\"\n145 matches = []\n146 if isinstance(value, list):\n147 matches.extend(self.check_value(value[0], path, False))\n148 elif isinstance(value, six.string_types):\n149 matches.extend(self.check_value(value, path, False))\n150 \n151 return matches\n152 \n153 def match_resource_properties(self, properties, _, path, cfn):\n154 \"\"\"Check CloudFormation Properties\"\"\"\n155 matches = []\n156 \n157 matches.extend(\n158 cfn.check_value(\n159 obj=properties, key='DefinitionString',\n160 path=path[:],\n161 check_value=self.check_value,\n162 check_sub=self.check_sub\n163 ))\n164 \n165 return matches\n166\n[end of src/cfnlint/rules/resources/stepfunctions/StateMachine.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n@@ -51,7 +51,7 @@\n 'Type',\n ]\n state_key_types = {\n- 'Pass': ['Result', 'ResultPath'],\n+ 'Pass': ['Result', 'ResultPath', 'Parameters'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\\n@@ -51,7 +51,7 @@\\n 'Type',\\n ]\\n state_key_types = {\\n- 'Pass': ['Result', 'ResultPath'],\\n+ 'Pass': ['Result', 'ResultPath', 'Parameters'],\\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\\n 'Choice': ['Choices', 'Default'],\\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\\n\", \"issue\": \"E2532 when Step Functions Pass state has Parameters key\\n*cfn-lint version: 0.17.0*\\r\\n\\r\\nUsing latest version of cfn-lint and updated spec files.\\r\\n\\r\\n*Description of issue.*\\r\\n\\r\\nLinting a CF template fails when a `AWS::StepFunctions::StateMachine`'s `DefinitionString` has a `Pass` state with the `Parameters` key.\\r\\n\\r\\nExample template:\\r\\n\\r\\n```yaml\\r\\nAWSTemplateFormatVersion: 2010-09-09\\r\\nResources:\\r\\n ExampleStateMachine:\\r\\n Type: AWS::StepFunctions::StateMachine\\r\\n Properties:\\r\\n DefinitionString: |-\\r\\n {\\r\\n \\\"StartAt\\\": \\\"Pop Element from List\\\",\\r\\n \\\"States\\\": {\\r\\n \\\"Pop Element from List\\\": {\\r\\n \\\"Type\\\": \\\"Pass\\\",\\r\\n \\\"Parameters\\\": {\\r\\n \\\"List.$\\\": \\\"$.List[1:]\\\"\\r\\n },\\r\\n \\\"End\\\": true\\r\\n }\\r\\n }\\r\\n }\\r\\n RoleArn: redacted\\r\\n```\\r\\n\\r\\nError:\\r\\n\\r\\n```\\r\\nE2532 State Machine Definition key (Parameters) for State (Pop Element from List) of Type (Pass) is not valid\\r\\nexample.yaml:6:13\\r\\n```\\r\\n\\r\\nHowever, the above `Pass` state conforms to the ASL [spec](https://states-language.net/spec.html#state-type-table) and [docs](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) and comes directly from the [Transfer Data Records sample project](https://docs.aws.amazon.com/step-functions/latest/dg/sample-project-transfer-data-sqs.html).\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"\\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\\n\\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\\n software and associated documentation files (the \\\"Software\\\"), to deal in the Software\\n without restriction, including without limitation the rights to use, copy, modify,\\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\\n permit persons to whom the Software is furnished to do so.\\n\\n THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\\n\\\"\\\"\\\"\\nimport json\\nimport six\\nfrom cfnlint import CloudFormationLintRule\\nfrom cfnlint import RuleMatch\\n\\n\\nclass StateMachine(CloudFormationLintRule):\\n \\\"\\\"\\\"Check State Machine Definition\\\"\\\"\\\"\\n id = 'E2532'\\n shortdesc = 'Check State Machine Definition for proper syntax'\\n description = 'Check the State Machine String Definition to make sure its JSON. ' \\\\\\n 'Validate basic syntax of the file to determine validity.'\\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'\\n tags = ['resources', 'stepfunctions']\\n\\n def __init__(self):\\n \\\"\\\"\\\"Init\\\"\\\"\\\"\\n super(StateMachine, self).__init__()\\n self.resource_property_types.append('AWS::StepFunctions::StateMachine')\\n\\n def _check_state_json(self, def_json, state_name, path):\\n \\\"\\\"\\\"Check State JSON Definition\\\"\\\"\\\"\\n matches = []\\n\\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\\n common_state_keys = [\\n 'Next',\\n 'End',\\n 'Type',\\n 'Comment',\\n 'InputPath',\\n 'OutputPath',\\n ]\\n common_state_required_keys = [\\n 'Type',\\n ]\\n state_key_types = {\\n 'Pass': ['Result', 'ResultPath'],\\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\\n 'Choice': ['Choices', 'Default'],\\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\\n 'Succeed': [],\\n 'Fail': ['Cause', 'Error'],\\n 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']\\n }\\n state_required_types = {\\n 'Pass': [],\\n 'Task': ['Resource'],\\n 'Choice': ['Choices'],\\n 'Wait': [],\\n 'Succeed': [],\\n 'Fail': [],\\n 'Parallel': ['Branches']\\n }\\n\\n for req_key in common_state_required_keys:\\n if req_key not in def_json:\\n message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)\\n matches.append(RuleMatch(path, message))\\n return matches\\n\\n state_type = def_json.get('Type')\\n\\n if state_type in state_key_types:\\n for state_key, _ in def_json.items():\\n if state_key not in common_state_keys + state_key_types.get(state_type, []):\\n message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)\\n matches.append(RuleMatch(path, message))\\n for req_key in common_state_required_keys + state_required_types.get(state_type, []):\\n if req_key not in def_json:\\n message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)\\n matches.append(RuleMatch(path, message))\\n return matches\\n else:\\n message = 'State Machine Definition Type (%s) is not valid' % (state_type)\\n matches.append(RuleMatch(path, message))\\n\\n return matches\\n\\n def _check_definition_json(self, def_json, path):\\n \\\"\\\"\\\"Check JSON Definition\\\"\\\"\\\"\\n matches = []\\n\\n top_level_keys = [\\n 'Comment',\\n 'StartAt',\\n 'TimeoutSeconds',\\n 'Version',\\n 'States'\\n ]\\n top_level_required_keys = [\\n 'StartAt',\\n 'States'\\n ]\\n for top_key, _ in def_json.items():\\n if top_key not in top_level_keys:\\n message = 'State Machine Definition key (%s) is not valid' % top_key\\n matches.append(RuleMatch(path, message))\\n\\n for req_key in top_level_required_keys:\\n if req_key not in def_json:\\n message = 'State Machine Definition required key (%s) is missing' % req_key\\n matches.append(RuleMatch(path, message))\\n\\n for state_name, state_value in def_json.get('States', {}).items():\\n matches.extend(self._check_state_json(state_value, state_name, path))\\n return matches\\n\\n def check_value(self, value, path, fail_on_loads=True):\\n \\\"\\\"\\\"Check Definition Value\\\"\\\"\\\"\\n matches = []\\n try:\\n def_json = json.loads(value)\\n # pylint: disable=W0703\\n except Exception as err:\\n if fail_on_loads:\\n message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err\\n matches.append(RuleMatch(path, message))\\n return matches\\n\\n self.logger.debug('State Machine definition could not be parsed. Skipping')\\n return matches\\n\\n matches.extend(self._check_definition_json(def_json, path))\\n return matches\\n\\n def check_sub(self, value, path):\\n \\\"\\\"\\\"Check Sub Object\\\"\\\"\\\"\\n matches = []\\n if isinstance(value, list):\\n matches.extend(self.check_value(value[0], path, False))\\n elif isinstance(value, six.string_types):\\n matches.extend(self.check_value(value, path, False))\\n\\n return matches\\n\\n def match_resource_properties(self, properties, _, path, cfn):\\n \\\"\\\"\\\"Check CloudFormation Properties\\\"\\\"\\\"\\n matches = []\\n\\n matches.extend(\\n cfn.check_value(\\n obj=properties, key='DefinitionString',\\n path=path[:],\\n check_value=self.check_value,\\n check_sub=self.check_sub\\n ))\\n\\n return matches\\n\", \"path\": \"src/cfnlint/rules/resources/stepfunctions/StateMachine.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2666,"string":"2,666"},"num_tokens_diff":{"kind":"number","value":168,"string":"168"}}},{"rowIdx":18144,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_29287"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"weni-ai__bothub-engine-77"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nIs possible translate example to same language\n\n\n\n\n[start of bothub/api/serializers/translate.py]\n1 from rest_framework import serializers\n2 \n3 from django.utils.translation import gettext as _\n4 \n5 from bothub.common.models import RepositoryTranslatedExampleEntity\n6 from bothub.common.models import RepositoryTranslatedExample\n7 from bothub.common.models import RepositoryExample\n8 \n9 from ..validators import CanContributeInRepositoryTranslatedExampleValidator\n10 from ..validators import CanContributeInRepositoryExampleValidator\n11 from ..validators import TranslatedExampleEntitiesValidator\n12 \n13 \n14 class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n15 class Meta:\n16 model = RepositoryTranslatedExampleEntity\n17 fields = [\n18 'id',\n19 'repository_translated_example',\n20 'start',\n21 'end',\n22 'entity',\n23 'created_at',\n24 'value',\n25 ]\n26 \n27 repository_translated_example = serializers.PrimaryKeyRelatedField(\n28 queryset=RepositoryTranslatedExample.objects,\n29 validators=[\n30 CanContributeInRepositoryTranslatedExampleValidator(),\n31 ],\n32 help_text='Example translation ID')\n33 value = serializers.SerializerMethodField()\n34 \n35 def get_value(self, obj):\n36 return obj.value\n37 \n38 \n39 class RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n40 class Meta:\n41 model = RepositoryTranslatedExample\n42 fields = [\n43 'id',\n44 'original_example',\n45 'from_language',\n46 'language',\n47 'text',\n48 'has_valid_entities',\n49 'entities',\n50 'created_at',\n51 ]\n52 \n53 original_example = serializers.PrimaryKeyRelatedField(\n54 queryset=RepositoryExample.objects,\n55 validators=[\n56 CanContributeInRepositoryExampleValidator(),\n57 ],\n58 help_text=_('Example\\'s ID'))\n59 from_language = serializers.SerializerMethodField()\n60 has_valid_entities = serializers.SerializerMethodField()\n61 entities = RepositoryTranslatedExampleEntitySeralizer(\n62 many=True,\n63 read_only=True)\n64 \n65 def get_from_language(self, obj):\n66 return obj.original_example.repository_update.language\n67 \n68 def get_has_valid_entities(self, obj):\n69 return obj.has_valid_entities\n70 \n71 \n72 class NewRepositoryTranslatedExampleEntitySeralizer(\n73 serializers.ModelSerializer):\n74 class Meta:\n75 model = RepositoryTranslatedExampleEntity\n76 fields = [\n77 'start',\n78 'end',\n79 'entity',\n80 ]\n81 \n82 \n83 class NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n84 class Meta:\n85 model = RepositoryTranslatedExample\n86 fields = [\n87 'id',\n88 'original_example',\n89 'language',\n90 'text',\n91 'has_valid_entities',\n92 'entities',\n93 ]\n94 \n95 def __init__(self, *args, **kwargs):\n96 super().__init__(*args, **kwargs)\n97 self.validators.append(TranslatedExampleEntitiesValidator())\n98 \n99 original_example = serializers.PrimaryKeyRelatedField(\n100 queryset=RepositoryExample.objects,\n101 validators=[\n102 CanContributeInRepositoryExampleValidator(),\n103 ],\n104 help_text=_('Example\\'s ID'))\n105 has_valid_entities = serializers.SerializerMethodField()\n106 entities = NewRepositoryTranslatedExampleEntitySeralizer(\n107 many=True,\n108 style={'text_field': 'text'})\n109 \n110 def get_has_valid_entities(self, obj):\n111 return obj.has_valid_entities\n112 \n113 def create(self, validated_data):\n114 entities_data = validated_data.pop('entities')\n115 \n116 translated = self.Meta.model.objects.create(**validated_data)\n117 for entity_data in entities_data:\n118 RepositoryTranslatedExampleEntity.objects.create(\n119 repository_translated_example=translated,\n120 **entity_data)\n121 return translated\n122\n[end of bothub/api/serializers/translate.py]\n[start of bothub/api/validators.py]\n1 from django.utils.translation import gettext as _\n2 from rest_framework.exceptions import PermissionDenied\n3 from rest_framework.exceptions import ValidationError\n4 \n5 from bothub.common.models import RepositoryTranslatedExample\n6 \n7 \n8 class CanContributeInRepositoryValidator(object):\n9 def __call__(self, value):\n10 user_authorization = value.get_user_authorization(\n11 self.request.user)\n12 if not user_authorization.can_contribute:\n13 raise PermissionDenied(\n14 _('You can\\'t contribute in this repository'))\n15 \n16 def set_context(self, serializer):\n17 self.request = serializer.context.get('request')\n18 \n19 \n20 class CanContributeInRepositoryExampleValidator(object):\n21 def __call__(self, value):\n22 repository = value.repository_update.repository\n23 user_authorization = repository.get_user_authorization(\n24 self.request.user)\n25 if not user_authorization.can_contribute:\n26 raise PermissionDenied(\n27 _('You can\\'t contribute in this repository'))\n28 \n29 def set_context(self, serializer):\n30 self.request = serializer.context.get('request')\n31 \n32 \n33 class CanContributeInRepositoryTranslatedExampleValidator(object):\n34 def __call__(self, value):\n35 repository = value.original_example.repository_update.repository\n36 user_authorization = repository.get_user_authorization(\n37 self.request.user)\n38 if not user_authorization.can_contribute:\n39 raise PermissionDenied(\n40 _('You can\\'t contribute in this repository'))\n41 \n42 def set_context(self, serializer):\n43 self.request = serializer.context.get('request')\n44 \n45 \n46 class TranslatedExampleEntitiesValidator(object):\n47 def __call__(self, attrs):\n48 original_example = attrs.get('original_example')\n49 entities_valid = RepositoryTranslatedExample.same_entities_validator(\n50 list(map(lambda x: dict(x), attrs.get('entities'))),\n51 list(map(lambda x: x.to_dict, original_example.entities.all())))\n52 if not entities_valid:\n53 raise ValidationError({'entities': _('Invalid entities')})\n54\n[end of bothub/api/validators.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py\n--- a/bothub/api/serializers/translate.py\n+++ b/bothub/api/serializers/translate.py\n@@ -9,6 +9,7 @@\n from ..validators import CanContributeInRepositoryTranslatedExampleValidator\n from ..validators import CanContributeInRepositoryExampleValidator\n from ..validators import TranslatedExampleEntitiesValidator\n+from ..validators import TranslatedExampleLanguageValidator\n \n \n class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n@@ -95,6 +96,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n+ self.validators.append(TranslatedExampleLanguageValidator())\n \n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\n--- a/bothub/api/validators.py\n+++ b/bothub/api/validators.py\n@@ -51,3 +51,11 @@\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n+\n+\n+class TranslatedExampleLanguageValidator(object):\n+ def __call__(self, attrs):\n+ original_example = attrs.get('original_example')\n+ language = attrs.get('language')\n+ if original_example.repository_update.language == language:\n+ raise ValidationError({'language': _('Can\\'t translate to same language')})\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py\\n--- a/bothub/api/serializers/translate.py\\n+++ b/bothub/api/serializers/translate.py\\n@@ -9,6 +9,7 @@\\n from ..validators import CanContributeInRepositoryTranslatedExampleValidator\\n from ..validators import CanContributeInRepositoryExampleValidator\\n from ..validators import TranslatedExampleEntitiesValidator\\n+from ..validators import TranslatedExampleLanguageValidator\\n \\n \\n class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\\n@@ -95,6 +96,7 @@\\n def __init__(self, *args, **kwargs):\\n super().__init__(*args, **kwargs)\\n self.validators.append(TranslatedExampleEntitiesValidator())\\n+ self.validators.append(TranslatedExampleLanguageValidator())\\n \\n original_example = serializers.PrimaryKeyRelatedField(\\n queryset=RepositoryExample.objects,\\ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\\n--- a/bothub/api/validators.py\\n+++ b/bothub/api/validators.py\\n@@ -51,3 +51,11 @@\\n list(map(lambda x: x.to_dict, original_example.entities.all())))\\n if not entities_valid:\\n raise ValidationError({'entities': _('Invalid entities')})\\n+\\n+\\n+class TranslatedExampleLanguageValidator(object):\\n+ def __call__(self, attrs):\\n+ original_example = attrs.get('original_example')\\n+ language = attrs.get('language')\\n+ if original_example.repository_update.language == language:\\n+ raise ValidationError({'language': _('Can\\\\'t translate to same language')})\\n\", \"issue\": \"Is possible translate example to same language\\n\\n\", \"before_files\": [{\"content\": \"from rest_framework import serializers\\n\\nfrom django.utils.translation import gettext as _\\n\\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\\nfrom bothub.common.models import RepositoryTranslatedExample\\nfrom bothub.common.models import RepositoryExample\\n\\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\\nfrom ..validators import CanContributeInRepositoryExampleValidator\\nfrom ..validators import TranslatedExampleEntitiesValidator\\n\\n\\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\\n class Meta:\\n model = RepositoryTranslatedExampleEntity\\n fields = [\\n 'id',\\n 'repository_translated_example',\\n 'start',\\n 'end',\\n 'entity',\\n 'created_at',\\n 'value',\\n ]\\n\\n repository_translated_example = serializers.PrimaryKeyRelatedField(\\n queryset=RepositoryTranslatedExample.objects,\\n validators=[\\n CanContributeInRepositoryTranslatedExampleValidator(),\\n ],\\n help_text='Example translation ID')\\n value = serializers.SerializerMethodField()\\n\\n def get_value(self, obj):\\n return obj.value\\n\\n\\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\\n class Meta:\\n model = RepositoryTranslatedExample\\n fields = [\\n 'id',\\n 'original_example',\\n 'from_language',\\n 'language',\\n 'text',\\n 'has_valid_entities',\\n 'entities',\\n 'created_at',\\n ]\\n\\n original_example = serializers.PrimaryKeyRelatedField(\\n queryset=RepositoryExample.objects,\\n validators=[\\n CanContributeInRepositoryExampleValidator(),\\n ],\\n help_text=_('Example\\\\'s ID'))\\n from_language = serializers.SerializerMethodField()\\n has_valid_entities = serializers.SerializerMethodField()\\n entities = RepositoryTranslatedExampleEntitySeralizer(\\n many=True,\\n read_only=True)\\n\\n def get_from_language(self, obj):\\n return obj.original_example.repository_update.language\\n\\n def get_has_valid_entities(self, obj):\\n return obj.has_valid_entities\\n\\n\\nclass NewRepositoryTranslatedExampleEntitySeralizer(\\n serializers.ModelSerializer):\\n class Meta:\\n model = RepositoryTranslatedExampleEntity\\n fields = [\\n 'start',\\n 'end',\\n 'entity',\\n ]\\n\\n\\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\\n class Meta:\\n model = RepositoryTranslatedExample\\n fields = [\\n 'id',\\n 'original_example',\\n 'language',\\n 'text',\\n 'has_valid_entities',\\n 'entities',\\n ]\\n\\n def __init__(self, *args, **kwargs):\\n super().__init__(*args, **kwargs)\\n self.validators.append(TranslatedExampleEntitiesValidator())\\n\\n original_example = serializers.PrimaryKeyRelatedField(\\n queryset=RepositoryExample.objects,\\n validators=[\\n CanContributeInRepositoryExampleValidator(),\\n ],\\n help_text=_('Example\\\\'s ID'))\\n has_valid_entities = serializers.SerializerMethodField()\\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\\n many=True,\\n style={'text_field': 'text'})\\n\\n def get_has_valid_entities(self, obj):\\n return obj.has_valid_entities\\n\\n def create(self, validated_data):\\n entities_data = validated_data.pop('entities')\\n\\n translated = self.Meta.model.objects.create(**validated_data)\\n for entity_data in entities_data:\\n RepositoryTranslatedExampleEntity.objects.create(\\n repository_translated_example=translated,\\n **entity_data)\\n return translated\\n\", \"path\": \"bothub/api/serializers/translate.py\"}, {\"content\": \"from django.utils.translation import gettext as _\\nfrom rest_framework.exceptions import PermissionDenied\\nfrom rest_framework.exceptions import ValidationError\\n\\nfrom bothub.common.models import RepositoryTranslatedExample\\n\\n\\nclass CanContributeInRepositoryValidator(object):\\n def __call__(self, value):\\n user_authorization = value.get_user_authorization(\\n self.request.user)\\n if not user_authorization.can_contribute:\\n raise PermissionDenied(\\n _('You can\\\\'t contribute in this repository'))\\n\\n def set_context(self, serializer):\\n self.request = serializer.context.get('request')\\n\\n\\nclass CanContributeInRepositoryExampleValidator(object):\\n def __call__(self, value):\\n repository = value.repository_update.repository\\n user_authorization = repository.get_user_authorization(\\n self.request.user)\\n if not user_authorization.can_contribute:\\n raise PermissionDenied(\\n _('You can\\\\'t contribute in this repository'))\\n\\n def set_context(self, serializer):\\n self.request = serializer.context.get('request')\\n\\n\\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\\n def __call__(self, value):\\n repository = value.original_example.repository_update.repository\\n user_authorization = repository.get_user_authorization(\\n self.request.user)\\n if not user_authorization.can_contribute:\\n raise PermissionDenied(\\n _('You can\\\\'t contribute in this repository'))\\n\\n def set_context(self, serializer):\\n self.request = serializer.context.get('request')\\n\\n\\nclass TranslatedExampleEntitiesValidator(object):\\n def __call__(self, attrs):\\n original_example = attrs.get('original_example')\\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\\n list(map(lambda x: dict(x), attrs.get('entities'))),\\n list(map(lambda x: x.to_dict, original_example.entities.all())))\\n if not entities_valid:\\n raise ValidationError({'entities': _('Invalid entities')})\\n\", \"path\": \"bothub/api/validators.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1995,"string":"1,995"},"num_tokens_diff":{"kind":"number","value":347,"string":"347"}}},{"rowIdx":18145,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_37497"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"Parsl__parsl-2221"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nPBSPro scheduler options ignored\n**Describe the bug**\r\nThe PBSPro provider template adds `${scheduler_options}` to the bottom of the list of #PBS commands. However, PBSPro seems to only consider the first #PBS option and ignore any later competing ones. This means specifying a new select option with gpus is superseded by the default one and ignored. We can resolve this by moving the user defined scheduler options to the top of the #PBS list in the template.\r\n\r\n**To Reproduce**\r\nUse the PBSPro provider (e.g., at ALCF's edge testbed for Polaris) and try to specify a new select option:`-l select=1:ncpus=32:ngpus=1`. These options are ignored in favor of the default specified above in the template.\r\n\r\n**Expected behavior**\r\nI would like scheduler_options to be put first so they can be used to enter specific options.\r\n\r\n**Environment**\r\n - Parsl 1.2.0\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? ALCF\r\n- Where do you need the workers to run ? Compute nodes\r\n\n\n\n\n[start of parsl/providers/pbspro/template.py]\n1 template_string = '''#!/bin/bash\n2 \n3 #PBS -S /bin/bash\n4 #PBS -N ${jobname}\n5 #PBS -m n\n6 #PBS -l walltime=$walltime\n7 #PBS -l select=${nodes_per_block}:ncpus=${ncpus}\n8 #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n9 #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n10 ${scheduler_options}\n11 \n12 ${worker_init}\n13 \n14 export JOBNAME=\"${jobname}\"\n15 \n16 ${user_script}\n17 \n18 '''\n19\n[end of parsl/providers/pbspro/template.py]\n[start of parsl/providers/pbspro/pbspro.py]\n1 import logging\n2 import os\n3 import time\n4 \n5 from parsl.channels import LocalChannel\n6 from parsl.launchers import SingleNodeLauncher\n7 from parsl.providers.pbspro.template import template_string\n8 from parsl.providers import TorqueProvider\n9 from parsl.providers.provider_base import JobState, JobStatus\n10 \n11 logger = logging.getLogger(__name__)\n12 \n13 \n14 class PBSProProvider(TorqueProvider):\n15 \"\"\"PBS Pro Execution Provider\n16 \n17 Parameters\n18 ----------\n19 channel : Channel\n20 Channel for accessing this provider. Possible channels include\n21 :class:`~parsl.channels.LocalChannel` (the default),\n22 :class:`~parsl.channels.SSHChannel`, or\n23 :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n24 account : str\n25 Account the job will be charged against.\n26 queue : str\n27 Queue to request blocks from.\n28 nodes_per_block : int\n29 Nodes to provision per block.\n30 cpus_per_node : int\n31 CPUs to provision per node.\n32 init_blocks : int\n33 Number of blocks to provision at the start of the run. Default is 1.\n34 min_blocks : int\n35 Minimum number of blocks to maintain. Default is 0.\n36 max_blocks : int\n37 Maximum number of blocks to maintain.\n38 parallelism : float\n39 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n40 scaling where as many resources as possible are used; parallelism close to 0 represents\n41 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n42 walltime : str\n43 Walltime requested per block in HH:MM:SS.\n44 scheduler_options : str\n45 String to prepend to the #PBS blocks in the submit script to the scheduler.\n46 worker_init : str\n47 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n48 launcher : Launcher\n49 Launcher for this provider. The default is\n50 :class:`~parsl.launchers.SingleNodeLauncher`.\n51 \"\"\"\n52 def __init__(self,\n53 channel=LocalChannel(),\n54 account=None,\n55 queue=None,\n56 scheduler_options='',\n57 worker_init='',\n58 nodes_per_block=1,\n59 cpus_per_node=1,\n60 init_blocks=1,\n61 min_blocks=0,\n62 max_blocks=1,\n63 parallelism=1,\n64 launcher=SingleNodeLauncher(),\n65 walltime=\"00:20:00\",\n66 cmd_timeout=120):\n67 super().__init__(channel,\n68 account,\n69 queue,\n70 scheduler_options,\n71 worker_init,\n72 nodes_per_block,\n73 init_blocks,\n74 min_blocks,\n75 max_blocks,\n76 parallelism,\n77 launcher,\n78 walltime,\n79 cmd_timeout=cmd_timeout)\n80 \n81 self.template_string = template_string\n82 self._label = 'pbspro'\n83 self.cpus_per_node = cpus_per_node\n84 \n85 def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n86 \"\"\"Submits the command job.\n87 \n88 Parameters\n89 ----------\n90 command : str\n91 Command to be executed on the remote side.\n92 tasks_per_node : int\n93 Command invocations to be launched per node.\n94 job_name : str\n95 Identifier for job.\n96 \n97 Returns\n98 -------\n99 None\n100 If at capacity and cannot provision more\n101 job_id : str\n102 Identifier for the job\n103 \"\"\"\n104 \n105 job_name = \"{0}.{1}\".format(job_name, time.time())\n106 \n107 script_path = os.path.abspath(\"{0}/{1}.submit\".format(self.script_dir, job_name))\n108 \n109 logger.debug(\"Requesting {} nodes_per_block, {} tasks_per_node\".format(\n110 self.nodes_per_block, tasks_per_node)\n111 )\n112 \n113 job_config = {}\n114 job_config[\"submit_script_dir\"] = self.channel.script_dir\n115 job_config[\"nodes_per_block\"] = self.nodes_per_block\n116 job_config[\"ncpus\"] = self.cpus_per_node\n117 job_config[\"walltime\"] = self.walltime\n118 job_config[\"scheduler_options\"] = self.scheduler_options\n119 job_config[\"worker_init\"] = self.worker_init\n120 job_config[\"user_script\"] = command\n121 \n122 # Wrap the command\n123 job_config[\"user_script\"] = self.launcher(command,\n124 tasks_per_node,\n125 self.nodes_per_block)\n126 \n127 logger.debug(\"Writing submit script\")\n128 self._write_submit_script(self.template_string, script_path, job_name, job_config)\n129 \n130 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n131 \n132 submit_options = ''\n133 if self.queue is not None:\n134 submit_options = '{0} -q {1}'.format(submit_options, self.queue)\n135 if self.account is not None:\n136 submit_options = '{0} -A {1}'.format(submit_options, self.account)\n137 \n138 launch_cmd = \"qsub {0} {1}\".format(submit_options, channel_script_path)\n139 retcode, stdout, stderr = self.execute_wait(launch_cmd)\n140 \n141 job_id = None\n142 if retcode == 0:\n143 for line in stdout.split('\\n'):\n144 if line.strip():\n145 job_id = line.strip()\n146 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n147 else:\n148 message = \"Command '{}' failed with return code {}\".format(launch_cmd, retcode)\n149 if (stdout is not None) and (stderr is not None):\n150 message += \"\\nstderr:{}\\nstdout{}\".format(stderr.strip(), stdout.strip())\n151 logger.error(message)\n152 \n153 return job_id\n154 \n155 @property\n156 def status_polling_interval(self):\n157 return 60\n158\n[end of parsl/providers/pbspro/pbspro.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/parsl/providers/pbspro/pbspro.py b/parsl/providers/pbspro/pbspro.py\n--- a/parsl/providers/pbspro/pbspro.py\n+++ b/parsl/providers/pbspro/pbspro.py\n@@ -43,6 +43,9 @@\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n+ select_options : str\n+ String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to\n+ specify ngpus.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n@@ -54,6 +57,7 @@\n account=None,\n queue=None,\n scheduler_options='',\n+ select_options='',\n worker_init='',\n nodes_per_block=1,\n cpus_per_node=1,\n@@ -81,6 +85,7 @@\n self.template_string = template_string\n self._label = 'pbspro'\n self.cpus_per_node = cpus_per_node\n+ self.select_options = select_options\n \n def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n \"\"\"Submits the command job.\n@@ -119,6 +124,12 @@\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n \n+ # Add a colon to select_options if one isn't included\n+ if self.select_options and not self.select_options.startswith(\":\"):\n+ self.select_options = \":\" + self.select_options\n+\n+ job_config[\"select_options\"] = self.select_options\n+\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\ndiff --git a/parsl/providers/pbspro/template.py b/parsl/providers/pbspro/template.py\n--- a/parsl/providers/pbspro/template.py\n+++ b/parsl/providers/pbspro/template.py\n@@ -4,7 +4,7 @@\n #PBS -N ${jobname}\n #PBS -m n\n #PBS -l walltime=$walltime\n-#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\n+#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n ${scheduler_options}\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/parsl/providers/pbspro/pbspro.py b/parsl/providers/pbspro/pbspro.py\\n--- a/parsl/providers/pbspro/pbspro.py\\n+++ b/parsl/providers/pbspro/pbspro.py\\n@@ -43,6 +43,9 @@\\n Walltime requested per block in HH:MM:SS.\\n scheduler_options : str\\n String to prepend to the #PBS blocks in the submit script to the scheduler.\\n+ select_options : str\\n+ String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to\\n+ specify ngpus.\\n worker_init : str\\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\\n launcher : Launcher\\n@@ -54,6 +57,7 @@\\n account=None,\\n queue=None,\\n scheduler_options='',\\n+ select_options='',\\n worker_init='',\\n nodes_per_block=1,\\n cpus_per_node=1,\\n@@ -81,6 +85,7 @@\\n self.template_string = template_string\\n self._label = 'pbspro'\\n self.cpus_per_node = cpus_per_node\\n+ self.select_options = select_options\\n \\n def submit(self, command, tasks_per_node, job_name=\\\"parsl\\\"):\\n \\\"\\\"\\\"Submits the command job.\\n@@ -119,6 +124,12 @@\\n job_config[\\\"worker_init\\\"] = self.worker_init\\n job_config[\\\"user_script\\\"] = command\\n \\n+ # Add a colon to select_options if one isn't included\\n+ if self.select_options and not self.select_options.startswith(\\\":\\\"):\\n+ self.select_options = \\\":\\\" + self.select_options\\n+\\n+ job_config[\\\"select_options\\\"] = self.select_options\\n+\\n # Wrap the command\\n job_config[\\\"user_script\\\"] = self.launcher(command,\\n tasks_per_node,\\ndiff --git a/parsl/providers/pbspro/template.py b/parsl/providers/pbspro/template.py\\n--- a/parsl/providers/pbspro/template.py\\n+++ b/parsl/providers/pbspro/template.py\\n@@ -4,7 +4,7 @@\\n #PBS -N ${jobname}\\n #PBS -m n\\n #PBS -l walltime=$walltime\\n-#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\\n+#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}\\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\\n ${scheduler_options}\\n\", \"issue\": \"PBSPro scheduler options ignored\\n**Describe the bug**\\r\\nThe PBSPro provider template adds `${scheduler_options}` to the bottom of the list of #PBS commands. However, PBSPro seems to only consider the first #PBS option and ignore any later competing ones. This means specifying a new select option with gpus is superseded by the default one and ignored. We can resolve this by moving the user defined scheduler options to the top of the #PBS list in the template.\\r\\n\\r\\n**To Reproduce**\\r\\nUse the PBSPro provider (e.g., at ALCF's edge testbed for Polaris) and try to specify a new select option:`-l select=1:ncpus=32:ngpus=1`. These options are ignored in favor of the default specified above in the template.\\r\\n\\r\\n**Expected behavior**\\r\\nI would like scheduler_options to be put first so they can be used to enter specific options.\\r\\n\\r\\n**Environment**\\r\\n - Parsl 1.2.0\\r\\n\\r\\n**Distributed Environment**\\r\\n- Where are you running the Parsl script from ? ALCF\\r\\n- Where do you need the workers to run ? Compute nodes\\r\\n\\n\", \"before_files\": [{\"content\": \"template_string = '''#!/bin/bash\\n\\n#PBS -S /bin/bash\\n#PBS -N ${jobname}\\n#PBS -m n\\n#PBS -l walltime=$walltime\\n#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\\n${scheduler_options}\\n\\n${worker_init}\\n\\nexport JOBNAME=\\\"${jobname}\\\"\\n\\n${user_script}\\n\\n'''\\n\", \"path\": \"parsl/providers/pbspro/template.py\"}, {\"content\": \"import logging\\nimport os\\nimport time\\n\\nfrom parsl.channels import LocalChannel\\nfrom parsl.launchers import SingleNodeLauncher\\nfrom parsl.providers.pbspro.template import template_string\\nfrom parsl.providers import TorqueProvider\\nfrom parsl.providers.provider_base import JobState, JobStatus\\n\\nlogger = logging.getLogger(__name__)\\n\\n\\nclass PBSProProvider(TorqueProvider):\\n \\\"\\\"\\\"PBS Pro Execution Provider\\n\\n Parameters\\n ----------\\n channel : Channel\\n Channel for accessing this provider. Possible channels include\\n :class:`~parsl.channels.LocalChannel` (the default),\\n :class:`~parsl.channels.SSHChannel`, or\\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\\n account : str\\n Account the job will be charged against.\\n queue : str\\n Queue to request blocks from.\\n nodes_per_block : int\\n Nodes to provision per block.\\n cpus_per_node : int\\n CPUs to provision per node.\\n init_blocks : int\\n Number of blocks to provision at the start of the run. Default is 1.\\n min_blocks : int\\n Minimum number of blocks to maintain. Default is 0.\\n max_blocks : int\\n Maximum number of blocks to maintain.\\n parallelism : float\\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\\n scaling where as many resources as possible are used; parallelism close to 0 represents\\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\\n walltime : str\\n Walltime requested per block in HH:MM:SS.\\n scheduler_options : str\\n String to prepend to the #PBS blocks in the submit script to the scheduler.\\n worker_init : str\\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\\n launcher : Launcher\\n Launcher for this provider. The default is\\n :class:`~parsl.launchers.SingleNodeLauncher`.\\n \\\"\\\"\\\"\\n def __init__(self,\\n channel=LocalChannel(),\\n account=None,\\n queue=None,\\n scheduler_options='',\\n worker_init='',\\n nodes_per_block=1,\\n cpus_per_node=1,\\n init_blocks=1,\\n min_blocks=0,\\n max_blocks=1,\\n parallelism=1,\\n launcher=SingleNodeLauncher(),\\n walltime=\\\"00:20:00\\\",\\n cmd_timeout=120):\\n super().__init__(channel,\\n account,\\n queue,\\n scheduler_options,\\n worker_init,\\n nodes_per_block,\\n init_blocks,\\n min_blocks,\\n max_blocks,\\n parallelism,\\n launcher,\\n walltime,\\n cmd_timeout=cmd_timeout)\\n\\n self.template_string = template_string\\n self._label = 'pbspro'\\n self.cpus_per_node = cpus_per_node\\n\\n def submit(self, command, tasks_per_node, job_name=\\\"parsl\\\"):\\n \\\"\\\"\\\"Submits the command job.\\n\\n Parameters\\n ----------\\n command : str\\n Command to be executed on the remote side.\\n tasks_per_node : int\\n Command invocations to be launched per node.\\n job_name : str\\n Identifier for job.\\n\\n Returns\\n -------\\n None\\n If at capacity and cannot provision more\\n job_id : str\\n Identifier for the job\\n \\\"\\\"\\\"\\n\\n job_name = \\\"{0}.{1}\\\".format(job_name, time.time())\\n\\n script_path = os.path.abspath(\\\"{0}/{1}.submit\\\".format(self.script_dir, job_name))\\n\\n logger.debug(\\\"Requesting {} nodes_per_block, {} tasks_per_node\\\".format(\\n self.nodes_per_block, tasks_per_node)\\n )\\n\\n job_config = {}\\n job_config[\\\"submit_script_dir\\\"] = self.channel.script_dir\\n job_config[\\\"nodes_per_block\\\"] = self.nodes_per_block\\n job_config[\\\"ncpus\\\"] = self.cpus_per_node\\n job_config[\\\"walltime\\\"] = self.walltime\\n job_config[\\\"scheduler_options\\\"] = self.scheduler_options\\n job_config[\\\"worker_init\\\"] = self.worker_init\\n job_config[\\\"user_script\\\"] = command\\n\\n # Wrap the command\\n job_config[\\\"user_script\\\"] = self.launcher(command,\\n tasks_per_node,\\n self.nodes_per_block)\\n\\n logger.debug(\\\"Writing submit script\\\")\\n self._write_submit_script(self.template_string, script_path, job_name, job_config)\\n\\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\\n\\n submit_options = ''\\n if self.queue is not None:\\n submit_options = '{0} -q {1}'.format(submit_options, self.queue)\\n if self.account is not None:\\n submit_options = '{0} -A {1}'.format(submit_options, self.account)\\n\\n launch_cmd = \\\"qsub {0} {1}\\\".format(submit_options, channel_script_path)\\n retcode, stdout, stderr = self.execute_wait(launch_cmd)\\n\\n job_id = None\\n if retcode == 0:\\n for line in stdout.split('\\\\n'):\\n if line.strip():\\n job_id = line.strip()\\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\\n else:\\n message = \\\"Command '{}' failed with return code {}\\\".format(launch_cmd, retcode)\\n if (stdout is not None) and (stderr is not None):\\n message += \\\"\\\\nstderr:{}\\\\nstdout{}\\\".format(stderr.strip(), stdout.strip())\\n logger.error(message)\\n\\n return job_id\\n\\n @property\\n def status_polling_interval(self):\\n return 60\\n\", \"path\": \"parsl/providers/pbspro/pbspro.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2509,"string":"2,509"},"num_tokens_diff":{"kind":"number","value":569,"string":"569"}}},{"rowIdx":18146,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_18321"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"crytic__slither-2394"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nfilter `name-reused` to only run on Truffle projects\nThe detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile \r\nhttps://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61\r\n\r\nhttps://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90\n\n\n\n[start of slither/detectors/slither/name_reused.py]\n1 from collections import defaultdict\n2 from typing import List\n3 \n4 from slither.core.compilation_unit import SlitherCompilationUnit\n5 from slither.core.declarations import Contract\n6 from slither.detectors.abstract_detector import (\n7 AbstractDetector,\n8 DetectorClassification,\n9 DETECTOR_INFO,\n10 )\n11 from slither.utils.output import Output\n12 \n13 \n14 def _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\n15 \"\"\"\n16 Filter contracts with missing inheritance to return only the \"most base\" contracts\n17 in the inheritance tree.\n18 :param slither:\n19 :return:\n20 \"\"\"\n21 missings = compilation_unit.contracts_with_missing_inheritance\n22 \n23 ret = []\n24 for b in missings:\n25 is_most_base = True\n26 for inheritance in b.immediate_inheritance:\n27 if inheritance in missings:\n28 is_most_base = False\n29 if is_most_base:\n30 ret.append(b)\n31 \n32 return ret\n33 \n34 \n35 class NameReused(AbstractDetector):\n36 ARGUMENT = \"name-reused\"\n37 HELP = \"Contract's name reused\"\n38 IMPACT = DetectorClassification.HIGH\n39 CONFIDENCE = DetectorClassification.HIGH\n40 \n41 WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\"\n42 \n43 WIKI_TITLE = \"Name reused\"\n44 \n45 # region wiki_description\n46 WIKI_DESCRIPTION = \"\"\"If a codebase has two contracts the similar names, the compilation artifacts\n47 will not contain one of the contracts with the duplicate name.\"\"\"\n48 # endregion wiki_description\n49 \n50 # region wiki_exploit_scenario\n51 WIKI_EXPLOIT_SCENARIO = \"\"\"\n52 Bob's `truffle` codebase has two contracts named `ERC20`.\n53 When `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\n54 As a result, the second contract cannot be analyzed.\n55 \"\"\"\n56 # endregion wiki_exploit_scenario\n57 \n58 WIKI_RECOMMENDATION = \"Rename the contract.\"\n59 \n60 # pylint: disable=too-many-locals,too-many-branches\n61 def _detect(self) -> List[Output]:\n62 results = []\n63 compilation_unit = self.compilation_unit\n64 \n65 all_contracts = compilation_unit.contracts\n66 all_contracts_name = [c.name for c in all_contracts]\n67 contracts_name_reused = {\n68 contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\n69 }\n70 \n71 names_reused = {\n72 name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\n73 }\n74 \n75 # First show the contracts that we know are missing\n76 incorrectly_constructed = [\n77 contract\n78 for contract in compilation_unit.contracts\n79 if contract.is_incorrectly_constructed\n80 ]\n81 \n82 inheritance_corrupted = defaultdict(list)\n83 for contract in incorrectly_constructed:\n84 for father in contract.inheritance:\n85 inheritance_corrupted[father.name].append(contract)\n86 \n87 for contract_name, files in names_reused.items():\n88 info: DETECTOR_INFO = [contract_name, \" is re-used:\\n\"]\n89 for file in files:\n90 if file is None:\n91 info += [\"\\t- In an file not found, most likely in\\n\"]\n92 else:\n93 info += [\"\\t- \", file, \"\\n\"]\n94 \n95 if contract_name in inheritance_corrupted:\n96 info += [\"\\tAs a result, the inherited contracts are not correctly analyzed:\\n\"]\n97 for corrupted in inheritance_corrupted[contract_name]:\n98 info += [\"\\t\\t- \", corrupted, \"\\n\"]\n99 res = self.generate_result(info)\n100 results.append(res)\n101 \n102 # Then show the contracts for which one of the father was not found\n103 # Here we are not able to know\n104 most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\n105 \n106 for b in most_base_with_missing_inheritance:\n107 info = [b, \" inherits from a contract for which the name is reused.\\n\"]\n108 if b.inheritance:\n109 info += [\"\\t- Slither could not determine which contract has a duplicate name:\\n\"]\n110 for inheritance in b.inheritance:\n111 info += [\"\\t\\t-\", inheritance, \"\\n\"]\n112 info += [\"\\t- Check if:\\n\"]\n113 info += [\"\\t\\t- A inherited contract is missing from this list,\\n\"]\n114 info += [\"\\t\\t- The contract are imported from the correct files.\\n\"]\n115 if b.derived_contracts:\n116 info += [f\"\\t- This issue impacts the contracts inheriting from {b.name}:\\n\"]\n117 for derived in b.derived_contracts:\n118 info += [\"\\t\\t-\", derived, \"\\n\"]\n119 res = self.generate_result(info)\n120 results.append(res)\n121 return results\n122\n[end of slither/detectors/slither/name_reused.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py\n--- a/slither/detectors/slither/name_reused.py\n+++ b/slither/detectors/slither/name_reused.py\n@@ -1,6 +1,8 @@\n from collections import defaultdict\n from typing import List\n \n+from crytic_compile.platform import Type as PlatformType\n+\n from slither.core.compilation_unit import SlitherCompilationUnit\n from slither.core.declarations import Contract\n from slither.detectors.abstract_detector import (\n@@ -61,6 +63,8 @@\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\n+ return []\n \n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py\\n--- a/slither/detectors/slither/name_reused.py\\n+++ b/slither/detectors/slither/name_reused.py\\n@@ -1,6 +1,8 @@\\n from collections import defaultdict\\n from typing import List\\n \\n+from crytic_compile.platform import Type as PlatformType\\n+\\n from slither.core.compilation_unit import SlitherCompilationUnit\\n from slither.core.declarations import Contract\\n from slither.detectors.abstract_detector import (\\n@@ -61,6 +63,8 @@\\n def _detect(self) -> List[Output]:\\n results = []\\n compilation_unit = self.compilation_unit\\n+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\\n+ return []\\n \\n all_contracts = compilation_unit.contracts\\n all_contracts_name = [c.name for c in all_contracts]\\n\", \"issue\": \"filter `name-reused` to only run on Truffle projects\\nThe detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile \\r\\nhttps://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61\\r\\n\\r\\nhttps://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90\\n\", \"before_files\": [{\"content\": \"from collections import defaultdict\\nfrom typing import List\\n\\nfrom slither.core.compilation_unit import SlitherCompilationUnit\\nfrom slither.core.declarations import Contract\\nfrom slither.detectors.abstract_detector import (\\n AbstractDetector,\\n DetectorClassification,\\n DETECTOR_INFO,\\n)\\nfrom slither.utils.output import Output\\n\\n\\ndef _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\\n \\\"\\\"\\\"\\n Filter contracts with missing inheritance to return only the \\\"most base\\\" contracts\\n in the inheritance tree.\\n :param slither:\\n :return:\\n \\\"\\\"\\\"\\n missings = compilation_unit.contracts_with_missing_inheritance\\n\\n ret = []\\n for b in missings:\\n is_most_base = True\\n for inheritance in b.immediate_inheritance:\\n if inheritance in missings:\\n is_most_base = False\\n if is_most_base:\\n ret.append(b)\\n\\n return ret\\n\\n\\nclass NameReused(AbstractDetector):\\n ARGUMENT = \\\"name-reused\\\"\\n HELP = \\\"Contract's name reused\\\"\\n IMPACT = DetectorClassification.HIGH\\n CONFIDENCE = DetectorClassification.HIGH\\n\\n WIKI = \\\"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\\\"\\n\\n WIKI_TITLE = \\\"Name reused\\\"\\n\\n # region wiki_description\\n WIKI_DESCRIPTION = \\\"\\\"\\\"If a codebase has two contracts the similar names, the compilation artifacts\\nwill not contain one of the contracts with the duplicate name.\\\"\\\"\\\"\\n # endregion wiki_description\\n\\n # region wiki_exploit_scenario\\n WIKI_EXPLOIT_SCENARIO = \\\"\\\"\\\"\\nBob's `truffle` codebase has two contracts named `ERC20`.\\nWhen `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\\nAs a result, the second contract cannot be analyzed.\\n\\\"\\\"\\\"\\n # endregion wiki_exploit_scenario\\n\\n WIKI_RECOMMENDATION = \\\"Rename the contract.\\\"\\n\\n # pylint: disable=too-many-locals,too-many-branches\\n def _detect(self) -> List[Output]:\\n results = []\\n compilation_unit = self.compilation_unit\\n\\n all_contracts = compilation_unit.contracts\\n all_contracts_name = [c.name for c in all_contracts]\\n contracts_name_reused = {\\n contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\\n }\\n\\n names_reused = {\\n name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\\n }\\n\\n # First show the contracts that we know are missing\\n incorrectly_constructed = [\\n contract\\n for contract in compilation_unit.contracts\\n if contract.is_incorrectly_constructed\\n ]\\n\\n inheritance_corrupted = defaultdict(list)\\n for contract in incorrectly_constructed:\\n for father in contract.inheritance:\\n inheritance_corrupted[father.name].append(contract)\\n\\n for contract_name, files in names_reused.items():\\n info: DETECTOR_INFO = [contract_name, \\\" is re-used:\\\\n\\\"]\\n for file in files:\\n if file is None:\\n info += [\\\"\\\\t- In an file not found, most likely in\\\\n\\\"]\\n else:\\n info += [\\\"\\\\t- \\\", file, \\\"\\\\n\\\"]\\n\\n if contract_name in inheritance_corrupted:\\n info += [\\\"\\\\tAs a result, the inherited contracts are not correctly analyzed:\\\\n\\\"]\\n for corrupted in inheritance_corrupted[contract_name]:\\n info += [\\\"\\\\t\\\\t- \\\", corrupted, \\\"\\\\n\\\"]\\n res = self.generate_result(info)\\n results.append(res)\\n\\n # Then show the contracts for which one of the father was not found\\n # Here we are not able to know\\n most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\\n\\n for b in most_base_with_missing_inheritance:\\n info = [b, \\\" inherits from a contract for which the name is reused.\\\\n\\\"]\\n if b.inheritance:\\n info += [\\\"\\\\t- Slither could not determine which contract has a duplicate name:\\\\n\\\"]\\n for inheritance in b.inheritance:\\n info += [\\\"\\\\t\\\\t-\\\", inheritance, \\\"\\\\n\\\"]\\n info += [\\\"\\\\t- Check if:\\\\n\\\"]\\n info += [\\\"\\\\t\\\\t- A inherited contract is missing from this list,\\\\n\\\"]\\n info += [\\\"\\\\t\\\\t- The contract are imported from the correct files.\\\\n\\\"]\\n if b.derived_contracts:\\n info += [f\\\"\\\\t- This issue impacts the contracts inheriting from {b.name}:\\\\n\\\"]\\n for derived in b.derived_contracts:\\n info += [\\\"\\\\t\\\\t-\\\", derived, \\\"\\\\n\\\"]\\n res = self.generate_result(info)\\n results.append(res)\\n return results\\n\", \"path\": \"slither/detectors/slither/name_reused.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2027,"string":"2,027"},"num_tokens_diff":{"kind":"number","value":205,"string":"205"}}},{"rowIdx":18147,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_16712"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"hydroshare__hydroshare-2263"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nMetadata strings updated through hs_restclient are parsed incorrectly\nWhen using the hs_restclient to update metadata for Generic and Composite resources (I haven't tested other types), string values are updated as a unicode string inside an array (e.g. [u'some_string'] ). \r\n\r\nHere's an example code snippet:\r\n`\r\nput_data = {\"title\": \"New Title\", \"description\": \"New Description\"}\r\nclient.updateScienceMetadata('f44c00556cd847b98dd47f3a6279014d', put_data)\r\n`\r\n\r\nTwo resources that show this issue:\r\n![image](https://user-images.githubusercontent.com/8953221/28534607-84703f50-705e-11e7-9e73-5edd5058368f.png)\r\n![image](https://user-images.githubusercontent.com/8953221/28534643-984a8530-705e-11e7-84d7-df0ba05379f5.png)\r\n\r\n\n\n\n\n[start of hs_core/views/resource_metadata_rest_api.py]\n1 import logging\n2 \n3 from rest_framework.response import Response\n4 from rest_framework.exceptions import ValidationError\n5 from rest_framework import status\n6 from rest_framework import generics\n7 from rest_framework import serializers\n8 \n9 from hs_core import hydroshare\n10 from hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \\\n11 ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation\n12 from hs_core.views import utils as view_utils\n13 from hs_core.views.utils import ACTION_TO_AUTHORIZE\n14 \n15 logger = logging.getLogger(__name__)\n16 \n17 \n18 class ExternalProfileLinkSerializer(serializers.Serializer):\n19 type = serializers.CharField(required=False)\n20 url = serializers.URLField(required=False)\n21 object_id = serializers.IntegerField(required=False)\n22 # content_type = models.ForeignKey(ContentType)\n23 # content_object = GenericForeignKey('content_type', 'object_id')\n24 \n25 class Meta:\n26 model = ExternalProfileLink\n27 \n28 \n29 class PartySerializer(serializers.Serializer):\n30 name = serializers.CharField()\n31 description = serializers.URLField(required=False)\n32 organization = serializers.CharField(required=False)\n33 email = serializers.EmailField(required=False)\n34 address = serializers.CharField(required=False)\n35 phone = serializers.CharField(required=False)\n36 homepage = serializers.URLField(required=False)\n37 external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)\n38 \n39 class Meta:\n40 model = Creator\n41 fields = {'name', 'description', 'organization', 'email',\n42 'address', 'phone', 'homepage', 'external_links'}\n43 \n44 \n45 class CreatorSerializer(PartySerializer):\n46 order = serializers.IntegerField(required=False)\n47 \n48 class Meta:\n49 model = Contributor\n50 \n51 \n52 class DateSerializer(serializers.Serializer):\n53 # term = 'Date'\n54 type = serializers.CharField(required=False)\n55 start_date = serializers.DateTimeField(required=False)\n56 end_date = serializers.DateTimeField(required=False)\n57 \n58 class Meta:\n59 model = Date\n60 \n61 \n62 class CoverageSerializer(serializers.Serializer):\n63 type = serializers.CharField(required=False)\n64 value = serializers.SerializerMethodField(required=False)\n65 \n66 class Meta:\n67 model = Coverage\n68 \n69 def get_value(self, obj):\n70 return obj.value\n71 \n72 \n73 class FormatSerializer(serializers.Serializer):\n74 value = serializers.CharField(required=False)\n75 \n76 class Meta:\n77 model = Format\n78 \n79 \n80 class FundingAgencySerializer(serializers.Serializer):\n81 agency_name = serializers.CharField()\n82 award_title = serializers.CharField(required=False)\n83 award_number = serializers.CharField(required=False)\n84 agency_url = serializers.URLField(required=False)\n85 \n86 class Meta:\n87 model = FundingAgency\n88 \n89 \n90 class IdentifierSerializer(serializers.Serializer):\n91 name = serializers.CharField(required=False)\n92 url = serializers.URLField(required=False)\n93 \n94 class Meta:\n95 model = Identifier\n96 \n97 \n98 class SubjectSerializer(serializers.Serializer):\n99 value = serializers.CharField(required=False)\n100 \n101 class Meta:\n102 model = Subject\n103 \n104 \n105 class SourceSerializer(serializers.Serializer):\n106 derived_from = serializers.CharField(required=False)\n107 \n108 class Meta:\n109 model = Source\n110 \n111 \n112 class RelationSerializer(serializers.Serializer):\n113 type = serializers.CharField(required=False)\n114 value = serializers.CharField(required=False)\n115 \n116 class Meta:\n117 model = Relation\n118 \n119 \n120 class CoreMetaDataSerializer(serializers.Serializer):\n121 title = serializers.CharField(required=False)\n122 creators = CreatorSerializer(required=False, many=True)\n123 contributors = PartySerializer(required=False, many=True)\n124 coverages = CoverageSerializer(required=False, many=True)\n125 dates = DateSerializer(required=False, many=True)\n126 description = serializers.CharField(required=False)\n127 formats = FormatSerializer(required=False, many=True)\n128 funding_agencies = FundingAgencySerializer(required=False, many=True)\n129 identifiers = IdentifierSerializer(required=False, many=True)\n130 language = serializers.CharField(required=False)\n131 rights = serializers.CharField(required=False)\n132 type = serializers.CharField(required=False)\n133 publisher = serializers.CharField(required=False)\n134 sources = SourceSerializer(required=False, many=True)\n135 subjects = SubjectSerializer(required=False, many=True)\n136 relations = RelationSerializer(required=False, many=True)\n137 \n138 class Meta:\n139 model = CoreMetaData\n140 \n141 \n142 class MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):\n143 \"\"\"\n144 Retrieve resource science (Dublin Core) metadata\n145 \n146 REST URL: /hsapi/resource/{pk}/scimeta/elements/\n147 HTTP method: GET\n148 \n149 :type pk: str\n150 :param pk: id of the resource\n151 :return: resource science metadata as JSON document\n152 :rtype: str\n153 :raises:\n154 NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}\n155 PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n156 this action.'}\n157 \n158 REST URL: /hsapi/resource/{pk}/scimeta/elements/\n159 HTTP method: PUT\n160 \n161 :type pk: str\n162 :param pk: id of the resource\n163 :type request: JSON formatted string\n164 :param request: resource metadata\n165 :return: updated resource science metadata as JSON document\n166 :rtype: str\n167 :raises:\n168 NotFound: return json format: {'detail': 'No resource was found for resource id':pk}\n169 PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n170 this action.'}\n171 ValidationError: return json format: {parameter-1': ['error message-1'],\n172 'parameter-2': ['error message-2'], .. }\n173 \"\"\"\n174 ACCEPT_FORMATS = ('application/json',)\n175 \n176 allowed_methods = ('GET', 'PUT')\n177 \n178 serializer_class = CoreMetaDataSerializer\n179 \n180 def get(self, request, pk):\n181 view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\n182 resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n183 serializer = resource.metadata.serializer\n184 return Response(data=serializer.data, status=status.HTTP_200_OK)\n185 \n186 def put(self, request, pk):\n187 # Update science metadata\n188 resource, _, _ = view_utils.authorize(\n189 request, pk,\n190 needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\n191 \n192 metadata = []\n193 put_data = request.data.copy()\n194 \n195 try:\n196 resource.metadata.parse_for_bulk_update(put_data, metadata)\n197 hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n198 except Exception as ex:\n199 error_msg = {\n200 'resource': \"Resource metadata update failed: %s, %s\"\n201 % (ex.__class__, ex.message)\n202 }\n203 raise ValidationError(detail=error_msg)\n204 \n205 resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n206 serializer = resource.metadata.serializer\n207 return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\n208\n[end of hs_core/views/resource_metadata_rest_api.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py\n--- a/hs_core/views/resource_metadata_rest_api.py\n+++ b/hs_core/views/resource_metadata_rest_api.py\n@@ -1,5 +1,7 @@\n import logging\n \n+from django.http import QueryDict\n+\n from rest_framework.response import Response\n from rest_framework.exceptions import ValidationError\n from rest_framework import status\n@@ -192,6 +194,10 @@\n metadata = []\n put_data = request.data.copy()\n \n+ # convert the QueryDict to dict\n+ if isinstance(put_data, QueryDict):\n+ put_data = put_data.dict()\n+\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py\\n--- a/hs_core/views/resource_metadata_rest_api.py\\n+++ b/hs_core/views/resource_metadata_rest_api.py\\n@@ -1,5 +1,7 @@\\n import logging\\n \\n+from django.http import QueryDict\\n+\\n from rest_framework.response import Response\\n from rest_framework.exceptions import ValidationError\\n from rest_framework import status\\n@@ -192,6 +194,10 @@\\n metadata = []\\n put_data = request.data.copy()\\n \\n+ # convert the QueryDict to dict\\n+ if isinstance(put_data, QueryDict):\\n+ put_data = put_data.dict()\\n+\\n try:\\n resource.metadata.parse_for_bulk_update(put_data, metadata)\\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\\n\", \"issue\": \"Metadata strings updated through hs_restclient are parsed incorrectly\\nWhen using the hs_restclient to update metadata for Generic and Composite resources (I haven't tested other types), string values are updated as a unicode string inside an array (e.g. [u'some_string'] ). \\r\\n\\r\\nHere's an example code snippet:\\r\\n`\\r\\nput_data = {\\\"title\\\": \\\"New Title\\\", \\\"description\\\": \\\"New Description\\\"}\\r\\nclient.updateScienceMetadata('f44c00556cd847b98dd47f3a6279014d', put_data)\\r\\n`\\r\\n\\r\\nTwo resources that show this issue:\\r\\n![image](https://user-images.githubusercontent.com/8953221/28534607-84703f50-705e-11e7-9e73-5edd5058368f.png)\\r\\n![image](https://user-images.githubusercontent.com/8953221/28534643-984a8530-705e-11e7-84d7-df0ba05379f5.png)\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"import logging\\n\\nfrom rest_framework.response import Response\\nfrom rest_framework.exceptions import ValidationError\\nfrom rest_framework import status\\nfrom rest_framework import generics\\nfrom rest_framework import serializers\\n\\nfrom hs_core import hydroshare\\nfrom hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \\\\\\n ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation\\nfrom hs_core.views import utils as view_utils\\nfrom hs_core.views.utils import ACTION_TO_AUTHORIZE\\n\\nlogger = logging.getLogger(__name__)\\n\\n\\nclass ExternalProfileLinkSerializer(serializers.Serializer):\\n type = serializers.CharField(required=False)\\n url = serializers.URLField(required=False)\\n object_id = serializers.IntegerField(required=False)\\n # content_type = models.ForeignKey(ContentType)\\n # content_object = GenericForeignKey('content_type', 'object_id')\\n\\n class Meta:\\n model = ExternalProfileLink\\n\\n\\nclass PartySerializer(serializers.Serializer):\\n name = serializers.CharField()\\n description = serializers.URLField(required=False)\\n organization = serializers.CharField(required=False)\\n email = serializers.EmailField(required=False)\\n address = serializers.CharField(required=False)\\n phone = serializers.CharField(required=False)\\n homepage = serializers.URLField(required=False)\\n external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)\\n\\n class Meta:\\n model = Creator\\n fields = {'name', 'description', 'organization', 'email',\\n 'address', 'phone', 'homepage', 'external_links'}\\n\\n\\nclass CreatorSerializer(PartySerializer):\\n order = serializers.IntegerField(required=False)\\n\\n class Meta:\\n model = Contributor\\n\\n\\nclass DateSerializer(serializers.Serializer):\\n # term = 'Date'\\n type = serializers.CharField(required=False)\\n start_date = serializers.DateTimeField(required=False)\\n end_date = serializers.DateTimeField(required=False)\\n\\n class Meta:\\n model = Date\\n\\n\\nclass CoverageSerializer(serializers.Serializer):\\n type = serializers.CharField(required=False)\\n value = serializers.SerializerMethodField(required=False)\\n\\n class Meta:\\n model = Coverage\\n\\n def get_value(self, obj):\\n return obj.value\\n\\n\\nclass FormatSerializer(serializers.Serializer):\\n value = serializers.CharField(required=False)\\n\\n class Meta:\\n model = Format\\n\\n\\nclass FundingAgencySerializer(serializers.Serializer):\\n agency_name = serializers.CharField()\\n award_title = serializers.CharField(required=False)\\n award_number = serializers.CharField(required=False)\\n agency_url = serializers.URLField(required=False)\\n\\n class Meta:\\n model = FundingAgency\\n\\n\\nclass IdentifierSerializer(serializers.Serializer):\\n name = serializers.CharField(required=False)\\n url = serializers.URLField(required=False)\\n\\n class Meta:\\n model = Identifier\\n\\n\\nclass SubjectSerializer(serializers.Serializer):\\n value = serializers.CharField(required=False)\\n\\n class Meta:\\n model = Subject\\n\\n\\nclass SourceSerializer(serializers.Serializer):\\n derived_from = serializers.CharField(required=False)\\n\\n class Meta:\\n model = Source\\n\\n\\nclass RelationSerializer(serializers.Serializer):\\n type = serializers.CharField(required=False)\\n value = serializers.CharField(required=False)\\n\\n class Meta:\\n model = Relation\\n\\n\\nclass CoreMetaDataSerializer(serializers.Serializer):\\n title = serializers.CharField(required=False)\\n creators = CreatorSerializer(required=False, many=True)\\n contributors = PartySerializer(required=False, many=True)\\n coverages = CoverageSerializer(required=False, many=True)\\n dates = DateSerializer(required=False, many=True)\\n description = serializers.CharField(required=False)\\n formats = FormatSerializer(required=False, many=True)\\n funding_agencies = FundingAgencySerializer(required=False, many=True)\\n identifiers = IdentifierSerializer(required=False, many=True)\\n language = serializers.CharField(required=False)\\n rights = serializers.CharField(required=False)\\n type = serializers.CharField(required=False)\\n publisher = serializers.CharField(required=False)\\n sources = SourceSerializer(required=False, many=True)\\n subjects = SubjectSerializer(required=False, many=True)\\n relations = RelationSerializer(required=False, many=True)\\n\\n class Meta:\\n model = CoreMetaData\\n\\n\\nclass MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):\\n \\\"\\\"\\\"\\n Retrieve resource science (Dublin Core) metadata\\n\\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\\n HTTP method: GET\\n\\n :type pk: str\\n :param pk: id of the resource\\n :return: resource science metadata as JSON document\\n :rtype: str\\n :raises:\\n NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}\\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\\n this action.'}\\n\\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\\n HTTP method: PUT\\n\\n :type pk: str\\n :param pk: id of the resource\\n :type request: JSON formatted string\\n :param request: resource metadata\\n :return: updated resource science metadata as JSON document\\n :rtype: str\\n :raises:\\n NotFound: return json format: {'detail': 'No resource was found for resource id':pk}\\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\\n this action.'}\\n ValidationError: return json format: {parameter-1': ['error message-1'],\\n 'parameter-2': ['error message-2'], .. }\\n \\\"\\\"\\\"\\n ACCEPT_FORMATS = ('application/json',)\\n\\n allowed_methods = ('GET', 'PUT')\\n\\n serializer_class = CoreMetaDataSerializer\\n\\n def get(self, request, pk):\\n view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\\n serializer = resource.metadata.serializer\\n return Response(data=serializer.data, status=status.HTTP_200_OK)\\n\\n def put(self, request, pk):\\n # Update science metadata\\n resource, _, _ = view_utils.authorize(\\n request, pk,\\n needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\\n\\n metadata = []\\n put_data = request.data.copy()\\n\\n try:\\n resource.metadata.parse_for_bulk_update(put_data, metadata)\\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\\n except Exception as ex:\\n error_msg = {\\n 'resource': \\\"Resource metadata update failed: %s, %s\\\"\\n % (ex.__class__, ex.message)\\n }\\n raise ValidationError(detail=error_msg)\\n\\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\\n serializer = resource.metadata.serializer\\n return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\\n\", \"path\": \"hs_core/views/resource_metadata_rest_api.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2703,"string":"2,703"},"num_tokens_diff":{"kind":"number","value":183,"string":"183"}}},{"rowIdx":18148,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_4452"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"googleapis__google-cloud-python-3517"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nBroken link\nHello,\r\n\r\nI found that the link at [https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/core/google/cloud/client.py#L33][1] is broken.\r\n\r\nhttps://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html\r\n\r\n[1]: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/661816540f1387bcc6e08b0fd722f4abae585b37/core/google/cloud/client.py#L33\n\n\n\n[start of core/google/cloud/client.py]\n1 # Copyright 2015 Google Inc.\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 \n15 \"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n16 \n17 import io\n18 import json\n19 from pickle import PicklingError\n20 \n21 import google.auth.credentials\n22 from google.oauth2 import service_account\n23 import google_auth_httplib2\n24 import six\n25 \n26 from google.cloud._helpers import _determine_default_project\n27 from google.cloud.credentials import get_credentials\n28 \n29 \n30 _GOOGLE_AUTH_CREDENTIALS_HELP = (\n31 'This library only supports credentials from google-auth-library-python. '\n32 'See https://google-cloud-python.readthedocs.io/en/latest/'\n33 'google-cloud-auth.html for help on authentication with this library.'\n34 )\n35 \n36 \n37 class _ClientFactoryMixin(object):\n38 \"\"\"Mixin to allow factories that create credentials.\n39 \n40 .. note::\n41 \n42 This class is virtual.\n43 \"\"\"\n44 \n45 _SET_PROJECT = False\n46 \n47 @classmethod\n48 def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\n49 \"\"\"Factory to retrieve JSON credentials while creating client.\n50 \n51 :type json_credentials_path: str\n52 :param json_credentials_path: The path to a private key file (this file\n53 was given to you when you created the\n54 service account). This file must contain\n55 a JSON object with a private key and\n56 other credentials information (downloaded\n57 from the Google APIs console).\n58 \n59 :type args: tuple\n60 :param args: Remaining positional arguments to pass to constructor.\n61 \n62 :type kwargs: dict\n63 :param kwargs: Remaining keyword arguments to pass to constructor.\n64 \n65 :rtype: :class:`_ClientFactoryMixin`\n66 :returns: The client created with the retrieved JSON credentials.\n67 :raises: :class:`TypeError` if there is a conflict with the kwargs\n68 and the credentials created by the factory.\n69 \"\"\"\n70 if 'credentials' in kwargs:\n71 raise TypeError('credentials must not be in keyword arguments')\n72 with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi:\n73 credentials_info = json.load(json_fi)\n74 credentials = service_account.Credentials.from_service_account_info(\n75 credentials_info)\n76 if cls._SET_PROJECT:\n77 if 'project' not in kwargs:\n78 kwargs['project'] = credentials_info.get('project_id')\n79 \n80 kwargs['credentials'] = credentials\n81 return cls(*args, **kwargs)\n82 \n83 \n84 class Client(_ClientFactoryMixin):\n85 \"\"\"Client to bundle configuration needed for API requests.\n86 \n87 Stores ``credentials`` and an HTTP object so that subclasses\n88 can pass them along to a connection class.\n89 \n90 If no value is passed in for ``_http``, a :class:`httplib2.Http` object\n91 will be created and authorized with the ``credentials``. If not, the\n92 ``credentials`` and ``_http`` need not be related.\n93 \n94 Callers and subclasses may seek to use the private key from\n95 ``credentials`` to sign data.\n96 \n97 A custom (non-``httplib2``) HTTP object must have a ``request`` method\n98 which accepts the following arguments:\n99 \n100 * ``uri``\n101 * ``method``\n102 * ``body``\n103 * ``headers``\n104 \n105 In addition, ``redirections`` and ``connection_type`` may be used.\n106 \n107 A custom ``_http`` object will also need to be able to add a bearer token\n108 to API requests and handle token refresh on 401 errors.\n109 \n110 :type credentials: :class:`~google.auth.credentials.Credentials`\n111 :param credentials: (Optional) The OAuth2 Credentials to use for this\n112 client. If not passed (and if no ``_http`` object is\n113 passed), falls back to the default inferred from the\n114 environment.\n115 \n116 :type _http: :class:`~httplib2.Http`\n117 :param _http: (Optional) HTTP object to make requests. Can be any object\n118 that defines ``request()`` with the same interface as\n119 :meth:`~httplib2.Http.request`. If not passed, an\n120 ``_http`` object is created that is bound to the\n121 ``credentials`` for the current object.\n122 This parameter should be considered private, and could\n123 change in the future.\n124 \"\"\"\n125 \n126 SCOPE = None\n127 \"\"\"The scopes required for authenticating with a service.\n128 \n129 Needs to be set by subclasses.\n130 \"\"\"\n131 \n132 def __init__(self, credentials=None, _http=None):\n133 if (credentials is not None and\n134 not isinstance(\n135 credentials, google.auth.credentials.Credentials)):\n136 raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)\n137 if credentials is None and _http is None:\n138 credentials = get_credentials()\n139 self._credentials = google.auth.credentials.with_scopes_if_required(\n140 credentials, self.SCOPE)\n141 self._http_internal = _http\n142 \n143 def __getstate__(self):\n144 \"\"\"Explicitly state that clients are not pickleable.\"\"\"\n145 raise PicklingError('\\n'.join([\n146 'Pickling client objects is explicitly not supported.',\n147 'Clients have non-trivial state that is local and unpickleable.',\n148 ]))\n149 \n150 @property\n151 def _http(self):\n152 \"\"\"Getter for object used for HTTP transport.\n153 \n154 :rtype: :class:`~httplib2.Http`\n155 :returns: An HTTP object.\n156 \"\"\"\n157 if self._http_internal is None:\n158 self._http_internal = google_auth_httplib2.AuthorizedHttp(\n159 self._credentials)\n160 return self._http_internal\n161 \n162 \n163 class _ClientProjectMixin(object):\n164 \"\"\"Mixin to allow setting the project on the client.\n165 \n166 :type project: str\n167 :param project: the project which the client acts on behalf of. If not\n168 passed falls back to the default inferred from the\n169 environment.\n170 \n171 :raises: :class:`EnvironmentError` if the project is neither passed in nor\n172 set in the environment. :class:`ValueError` if the project value\n173 is invalid.\n174 \"\"\"\n175 \n176 def __init__(self, project=None):\n177 project = self._determine_default(project)\n178 if project is None:\n179 raise EnvironmentError('Project was not passed and could not be '\n180 'determined from the environment.')\n181 if isinstance(project, six.binary_type):\n182 project = project.decode('utf-8')\n183 if not isinstance(project, six.string_types):\n184 raise ValueError('Project must be a string.')\n185 self.project = project\n186 \n187 @staticmethod\n188 def _determine_default(project):\n189 \"\"\"Helper: use default project detection.\"\"\"\n190 return _determine_default_project(project)\n191 \n192 \n193 class ClientWithProject(Client, _ClientProjectMixin):\n194 \"\"\"Client that also stores a project.\n195 \n196 :type project: str\n197 :param project: the project which the client acts on behalf of. If not\n198 passed falls back to the default inferred from the\n199 environment.\n200 \n201 :type credentials: :class:`~google.auth.credentials.Credentials`\n202 :param credentials: (Optional) The OAuth2 Credentials to use for this\n203 client. If not passed (and if no ``_http`` object is\n204 passed), falls back to the default inferred from the\n205 environment.\n206 \n207 :type _http: :class:`~httplib2.Http`\n208 :param _http: (Optional) HTTP object to make requests. Can be any object\n209 that defines ``request()`` with the same interface as\n210 :meth:`~httplib2.Http.request`. If not passed, an\n211 ``_http`` object is created that is bound to the\n212 ``credentials`` for the current object.\n213 This parameter should be considered private, and could\n214 change in the future.\n215 \n216 :raises: :class:`ValueError` if the project is neither passed in nor\n217 set in the environment.\n218 \"\"\"\n219 \n220 _SET_PROJECT = True # Used by from_service_account_json()\n221 \n222 def __init__(self, project=None, credentials=None, _http=None):\n223 _ClientProjectMixin.__init__(self, project=project)\n224 Client.__init__(self, credentials=credentials, _http=_http)\n225\n[end of core/google/cloud/client.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py\n--- a/core/google/cloud/client.py\n+++ b/core/google/cloud/client.py\n@@ -29,8 +29,8 @@\n \n _GOOGLE_AUTH_CREDENTIALS_HELP = (\n 'This library only supports credentials from google-auth-library-python. '\n- 'See https://google-cloud-python.readthedocs.io/en/latest/'\n- 'google-cloud-auth.html for help on authentication with this library.'\n+ 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html '\n+ 'for help on authentication with this library.'\n )\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py\\n--- a/core/google/cloud/client.py\\n+++ b/core/google/cloud/client.py\\n@@ -29,8 +29,8 @@\\n \\n _GOOGLE_AUTH_CREDENTIALS_HELP = (\\n 'This library only supports credentials from google-auth-library-python. '\\n- 'See https://google-cloud-python.readthedocs.io/en/latest/'\\n- 'google-cloud-auth.html for help on authentication with this library.'\\n+ 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html '\\n+ 'for help on authentication with this library.'\\n )\\n\", \"issue\": \"Broken link\\nHello,\\r\\n\\r\\nI found that the link at [https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/core/google/cloud/client.py#L33][1] is broken.\\r\\n\\r\\nhttps://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html\\r\\n\\r\\n[1]: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/661816540f1387bcc6e08b0fd722f4abae585b37/core/google/cloud/client.py#L33\\n\", \"before_files\": [{\"content\": \"# Copyright 2015 Google Inc.\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\n\\\"\\\"\\\"Base classes for client used to interact with Google Cloud APIs.\\\"\\\"\\\"\\n\\nimport io\\nimport json\\nfrom pickle import PicklingError\\n\\nimport google.auth.credentials\\nfrom google.oauth2 import service_account\\nimport google_auth_httplib2\\nimport six\\n\\nfrom google.cloud._helpers import _determine_default_project\\nfrom google.cloud.credentials import get_credentials\\n\\n\\n_GOOGLE_AUTH_CREDENTIALS_HELP = (\\n 'This library only supports credentials from google-auth-library-python. '\\n 'See https://google-cloud-python.readthedocs.io/en/latest/'\\n 'google-cloud-auth.html for help on authentication with this library.'\\n)\\n\\n\\nclass _ClientFactoryMixin(object):\\n \\\"\\\"\\\"Mixin to allow factories that create credentials.\\n\\n .. note::\\n\\n This class is virtual.\\n \\\"\\\"\\\"\\n\\n _SET_PROJECT = False\\n\\n @classmethod\\n def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\\n \\\"\\\"\\\"Factory to retrieve JSON credentials while creating client.\\n\\n :type json_credentials_path: str\\n :param json_credentials_path: The path to a private key file (this file\\n was given to you when you created the\\n service account). This file must contain\\n a JSON object with a private key and\\n other credentials information (downloaded\\n from the Google APIs console).\\n\\n :type args: tuple\\n :param args: Remaining positional arguments to pass to constructor.\\n\\n :type kwargs: dict\\n :param kwargs: Remaining keyword arguments to pass to constructor.\\n\\n :rtype: :class:`_ClientFactoryMixin`\\n :returns: The client created with the retrieved JSON credentials.\\n :raises: :class:`TypeError` if there is a conflict with the kwargs\\n and the credentials created by the factory.\\n \\\"\\\"\\\"\\n if 'credentials' in kwargs:\\n raise TypeError('credentials must not be in keyword arguments')\\n with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi:\\n credentials_info = json.load(json_fi)\\n credentials = service_account.Credentials.from_service_account_info(\\n credentials_info)\\n if cls._SET_PROJECT:\\n if 'project' not in kwargs:\\n kwargs['project'] = credentials_info.get('project_id')\\n\\n kwargs['credentials'] = credentials\\n return cls(*args, **kwargs)\\n\\n\\nclass Client(_ClientFactoryMixin):\\n \\\"\\\"\\\"Client to bundle configuration needed for API requests.\\n\\n Stores ``credentials`` and an HTTP object so that subclasses\\n can pass them along to a connection class.\\n\\n If no value is passed in for ``_http``, a :class:`httplib2.Http` object\\n will be created and authorized with the ``credentials``. If not, the\\n ``credentials`` and ``_http`` need not be related.\\n\\n Callers and subclasses may seek to use the private key from\\n ``credentials`` to sign data.\\n\\n A custom (non-``httplib2``) HTTP object must have a ``request`` method\\n which accepts the following arguments:\\n\\n * ``uri``\\n * ``method``\\n * ``body``\\n * ``headers``\\n\\n In addition, ``redirections`` and ``connection_type`` may be used.\\n\\n A custom ``_http`` object will also need to be able to add a bearer token\\n to API requests and handle token refresh on 401 errors.\\n\\n :type credentials: :class:`~google.auth.credentials.Credentials`\\n :param credentials: (Optional) The OAuth2 Credentials to use for this\\n client. If not passed (and if no ``_http`` object is\\n passed), falls back to the default inferred from the\\n environment.\\n\\n :type _http: :class:`~httplib2.Http`\\n :param _http: (Optional) HTTP object to make requests. Can be any object\\n that defines ``request()`` with the same interface as\\n :meth:`~httplib2.Http.request`. If not passed, an\\n ``_http`` object is created that is bound to the\\n ``credentials`` for the current object.\\n This parameter should be considered private, and could\\n change in the future.\\n \\\"\\\"\\\"\\n\\n SCOPE = None\\n \\\"\\\"\\\"The scopes required for authenticating with a service.\\n\\n Needs to be set by subclasses.\\n \\\"\\\"\\\"\\n\\n def __init__(self, credentials=None, _http=None):\\n if (credentials is not None and\\n not isinstance(\\n credentials, google.auth.credentials.Credentials)):\\n raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)\\n if credentials is None and _http is None:\\n credentials = get_credentials()\\n self._credentials = google.auth.credentials.with_scopes_if_required(\\n credentials, self.SCOPE)\\n self._http_internal = _http\\n\\n def __getstate__(self):\\n \\\"\\\"\\\"Explicitly state that clients are not pickleable.\\\"\\\"\\\"\\n raise PicklingError('\\\\n'.join([\\n 'Pickling client objects is explicitly not supported.',\\n 'Clients have non-trivial state that is local and unpickleable.',\\n ]))\\n\\n @property\\n def _http(self):\\n \\\"\\\"\\\"Getter for object used for HTTP transport.\\n\\n :rtype: :class:`~httplib2.Http`\\n :returns: An HTTP object.\\n \\\"\\\"\\\"\\n if self._http_internal is None:\\n self._http_internal = google_auth_httplib2.AuthorizedHttp(\\n self._credentials)\\n return self._http_internal\\n\\n\\nclass _ClientProjectMixin(object):\\n \\\"\\\"\\\"Mixin to allow setting the project on the client.\\n\\n :type project: str\\n :param project: the project which the client acts on behalf of. If not\\n passed falls back to the default inferred from the\\n environment.\\n\\n :raises: :class:`EnvironmentError` if the project is neither passed in nor\\n set in the environment. :class:`ValueError` if the project value\\n is invalid.\\n \\\"\\\"\\\"\\n\\n def __init__(self, project=None):\\n project = self._determine_default(project)\\n if project is None:\\n raise EnvironmentError('Project was not passed and could not be '\\n 'determined from the environment.')\\n if isinstance(project, six.binary_type):\\n project = project.decode('utf-8')\\n if not isinstance(project, six.string_types):\\n raise ValueError('Project must be a string.')\\n self.project = project\\n\\n @staticmethod\\n def _determine_default(project):\\n \\\"\\\"\\\"Helper: use default project detection.\\\"\\\"\\\"\\n return _determine_default_project(project)\\n\\n\\nclass ClientWithProject(Client, _ClientProjectMixin):\\n \\\"\\\"\\\"Client that also stores a project.\\n\\n :type project: str\\n :param project: the project which the client acts on behalf of. If not\\n passed falls back to the default inferred from the\\n environment.\\n\\n :type credentials: :class:`~google.auth.credentials.Credentials`\\n :param credentials: (Optional) The OAuth2 Credentials to use for this\\n client. If not passed (and if no ``_http`` object is\\n passed), falls back to the default inferred from the\\n environment.\\n\\n :type _http: :class:`~httplib2.Http`\\n :param _http: (Optional) HTTP object to make requests. Can be any object\\n that defines ``request()`` with the same interface as\\n :meth:`~httplib2.Http.request`. If not passed, an\\n ``_http`` object is created that is bound to the\\n ``credentials`` for the current object.\\n This parameter should be considered private, and could\\n change in the future.\\n\\n :raises: :class:`ValueError` if the project is neither passed in nor\\n set in the environment.\\n \\\"\\\"\\\"\\n\\n _SET_PROJECT = True # Used by from_service_account_json()\\n\\n def __init__(self, project=None, credentials=None, _http=None):\\n _ClientProjectMixin.__init__(self, project=project)\\n Client.__init__(self, credentials=credentials, _http=_http)\\n\", \"path\": \"core/google/cloud/client.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3054,"string":"3,054"},"num_tokens_diff":{"kind":"number","value":131,"string":"131"}}},{"rowIdx":18149,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_42452"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"freedomofpress__securedrop-6681"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nruntime `i18n` configuration manipulates global state\nI guess we already do this for LOCALES, but I think continuing the pattern should be accompanied by a TODO that manipulating/relying on global state is not desirable.\r\n\r\n_Originally posted by @legoktm in https://github.com/freedomofpress/securedrop/pull/6406#discussion_r863080227_\n\n\n\n[start of securedrop/i18n.py]\n1 #\n2 # SecureDrop whistleblower submission system\n3 # Copyright (C) 2017 Loic Dachary \n4 #\n5 # This program is free software: you can redistribute it and/or modify\n6 # it under the terms of the GNU Affero General Public License as published by\n7 # the Free Software Foundation, either version 3 of the License, or\n8 # (at your option) any later version.\n9 #\n10 # This program is distributed in the hope that it will be useful,\n11 # but WITHOUT ANY WARRANTY; without even the implied warranty of\n12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n13 # GNU Affero General Public License for more details.\n14 #\n15 # You should have received a copy of the GNU Affero General Public License\n16 # along with this program. If not, see .\n17 #\n18 import collections\n19 from typing import Dict, List, Set\n20 \n21 from babel.core import (\n22 Locale,\n23 UnknownLocaleError,\n24 get_locale_identifier,\n25 negotiate_locale,\n26 parse_locale,\n27 )\n28 from flask import Flask, g, request, session\n29 from flask_babel import Babel\n30 from sdconfig import FALLBACK_LOCALE, SDConfig\n31 \n32 \n33 class RequestLocaleInfo:\n34 \"\"\"\n35 Convenience wrapper around a babel.core.Locale.\n36 \"\"\"\n37 \n38 def __init__(self, locale: str):\n39 self.locale = Locale.parse(locale)\n40 \n41 # This attribute can be set to `True` to differentiate multiple\n42 # locales currently available (supported) for the same language.\n43 self.use_display_name = False\n44 \n45 def __str__(self) -> str:\n46 \"\"\"\n47 The Babel string representation of the locale.\n48 \"\"\"\n49 return str(self.locale)\n50 \n51 @property\n52 def display_name(self) -> str:\n53 \"\"\"\n54 Give callers (i.e., templates) the `Locale` object's display name when\n55 such resolution is warranted, otherwise the language name---as\n56 determined by `map_locale_display_names()`.\n57 \"\"\"\n58 if self.use_display_name:\n59 return self.locale.display_name\n60 return self.locale.language_name\n61 \n62 @property\n63 def text_direction(self) -> str:\n64 \"\"\"\n65 The Babel text direction: ltr or rtl.\n66 \n67 Used primarily to set text direction in HTML via the \"dir\"\n68 attribute.\n69 \"\"\"\n70 return self.locale.text_direction\n71 \n72 @property\n73 def language(self) -> str:\n74 \"\"\"\n75 The Babel language name.\n76 \n77 Just the language, without subtag info like region or script.\n78 \"\"\"\n79 return self.locale.language\n80 \n81 @property\n82 def id(self) -> str:\n83 \"\"\"\n84 The Babel string representation of the locale.\n85 \n86 This should match the name of the directory containing its\n87 translations.\n88 \"\"\"\n89 return str(self.locale)\n90 \n91 @property\n92 def language_tag(self) -> str:\n93 \"\"\"\n94 Returns a BCP47/RFC5646 language tag for the locale.\n95 \n96 Language tags are used in HTTP headers and the HTML lang\n97 attribute.\n98 \"\"\"\n99 return get_locale_identifier(parse_locale(str(self.locale)), sep=\"-\")\n100 \n101 \n102 def configure_babel(config: SDConfig, app: Flask) -> Babel:\n103 \"\"\"\n104 Set up Flask-Babel according to the SecureDrop configuration.\n105 \"\"\"\n106 # Tell Babel where to find our translations.\n107 translations_directory = str(config.TRANSLATION_DIRS.absolute())\n108 app.config[\"BABEL_TRANSLATION_DIRECTORIES\"] = translations_directory\n109 \n110 # Create the app's Babel instance. Passing the app to the\n111 # constructor causes the instance to attach itself to the app.\n112 babel = Babel(app)\n113 \n114 # verify that Babel is only using the translations we told it about\n115 if list(babel.translation_directories) != [translations_directory]:\n116 raise ValueError(\n117 \"Babel translation directories ({}) do not match SecureDrop configuration ({})\".format(\n118 babel.translation_directories, [translations_directory]\n119 )\n120 )\n121 \n122 # register the function used to determine the locale of a request\n123 babel.localeselector(lambda: get_locale(config))\n124 return babel\n125 \n126 \n127 def parse_locale_set(codes: List[str]) -> Set[Locale]:\n128 return {Locale.parse(code) for code in codes}\n129 \n130 \n131 def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n132 \"\"\"\n133 Check that configured locales are available in the filesystem and therefore usable by\n134 Babel. Warn about configured locales that are not usable, unless we're left with\n135 no usable default or fallback locale, in which case raise an exception.\n136 \"\"\"\n137 # These locales are available and loadable from the filesystem.\n138 available = set(babel.list_translations())\n139 available.add(Locale.parse(FALLBACK_LOCALE))\n140 \n141 # These locales were configured via \"securedrop-admin sdconfig\", meaning\n142 # they were present on the Admin Workstation at \"securedrop-admin\" runtime.\n143 configured = parse_locale_set(config.SUPPORTED_LOCALES)\n144 \n145 # The intersection of these sets is the set of locales usable by Babel.\n146 usable = available & configured\n147 \n148 missing = configured - usable\n149 if missing:\n150 babel.app.logger.error(\n151 f\"Configured locales {missing} are not in the set of usable locales {usable}\"\n152 )\n153 \n154 defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])\n155 if not defaults & usable:\n156 raise ValueError(\n157 f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n158 )\n159 \n160 global USABLE_LOCALES\n161 USABLE_LOCALES = usable\n162 \n163 \n164 # TODO(#6420): avoid relying on and manipulating on this global state\n165 LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\n166 USABLE_LOCALES = set() # type: Set[Locale]\n167 \n168 \n169 def map_locale_display_names(config: SDConfig) -> None:\n170 \"\"\"\n171 Create a map of locale identifiers to names for display.\n172 \n173 For most of our supported languages, we only provide one\n174 translation, so including the full display name is not necessary\n175 to distinguish them. For languages with more than one translation,\n176 like Chinese, we do need the additional detail.\n177 \"\"\"\n178 \n179 language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]\n180 for l in sorted(config.SUPPORTED_LOCALES):\n181 locale = RequestLocaleInfo(l)\n182 language_locale_counts[locale.language] += 1\n183 \n184 locale_map = collections.OrderedDict()\n185 for l in sorted(config.SUPPORTED_LOCALES):\n186 if Locale.parse(l) not in USABLE_LOCALES:\n187 continue\n188 \n189 locale = RequestLocaleInfo(l)\n190 if language_locale_counts[locale.language] > 1:\n191 # Disambiguate translations for this language.\n192 locale.use_display_name = True\n193 \n194 locale_map[str(locale)] = locale\n195 \n196 global LOCALES\n197 LOCALES = locale_map\n198 \n199 \n200 def configure(config: SDConfig, app: Flask) -> None:\n201 babel = configure_babel(config, app)\n202 validate_locale_configuration(config, babel)\n203 map_locale_display_names(config)\n204 \n205 \n206 def get_locale(config: SDConfig) -> str:\n207 \"\"\"\n208 Return the best supported locale for a request.\n209 \n210 Get the locale as follows, by order of precedence:\n211 - l request argument or session['locale']\n212 - browser suggested locale, from the Accept-Languages header\n213 - config.DEFAULT_LOCALE\n214 - config.FALLBACK_LOCALE\n215 \"\"\"\n216 preferences = []\n217 if session and session.get(\"locale\"):\n218 preferences.append(session.get(\"locale\"))\n219 if request.args.get(\"l\"):\n220 preferences.insert(0, request.args.get(\"l\"))\n221 if not preferences:\n222 preferences.extend(get_accepted_languages())\n223 preferences.append(config.DEFAULT_LOCALE)\n224 preferences.append(FALLBACK_LOCALE)\n225 \n226 negotiated = negotiate_locale(preferences, LOCALES.keys())\n227 \n228 if not negotiated:\n229 raise ValueError(\"No usable locale\")\n230 \n231 return negotiated\n232 \n233 \n234 def get_accepted_languages() -> List[str]:\n235 \"\"\"\n236 Convert a request's list of accepted languages into locale identifiers.\n237 \"\"\"\n238 accept_languages = []\n239 for l in request.accept_languages.values():\n240 try:\n241 parsed = Locale.parse(l, \"-\")\n242 accept_languages.append(str(parsed))\n243 \n244 # We only have two Chinese translations, simplified\n245 # and traditional, based on script and not\n246 # region. Browsers tend to send identifiers with\n247 # region, e.g. zh-CN or zh-TW. Babel can generally\n248 # infer the script from those, so we can fabricate a\n249 # fallback entry without region, in the hope that it\n250 # will match one of our translations and the site will\n251 # at least be more legible at first contact than the\n252 # probable default locale of English.\n253 if parsed.language == \"zh\" and parsed.script:\n254 accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))\n255 except (ValueError, UnknownLocaleError):\n256 pass\n257 return accept_languages\n258 \n259 \n260 def set_locale(config: SDConfig) -> None:\n261 \"\"\"\n262 Update locale info in request and session.\n263 \"\"\"\n264 locale = get_locale(config)\n265 g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n266 session[\"locale\"] = locale\n267 g.locales = LOCALES # pylint: disable=assigning-non-slot\n268\n[end of securedrop/i18n.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/securedrop/i18n.py b/securedrop/i18n.py\n--- a/securedrop/i18n.py\n+++ b/securedrop/i18n.py\n@@ -16,7 +16,7 @@\n # along with this program. If not, see .\n #\n import collections\n-from typing import Dict, List, Set\n+from typing import Dict, List, OrderedDict, Set\n \n from babel.core import (\n Locale,\n@@ -25,7 +25,7 @@\n negotiate_locale,\n parse_locale,\n )\n-from flask import Flask, g, request, session\n+from flask import Flask, current_app, g, request, session\n from flask_babel import Babel\n from sdconfig import FALLBACK_LOCALE, SDConfig\n \n@@ -128,7 +128,7 @@\n return {Locale.parse(code) for code in codes}\n \n \n-def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n+def validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n@@ -157,16 +157,12 @@\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n \n- global USABLE_LOCALES\n- USABLE_LOCALES = usable\n+ return usable\n \n \n-# TODO(#6420): avoid relying on and manipulating on this global state\n-LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\n-USABLE_LOCALES = set() # type: Set[Locale]\n-\n-\n-def map_locale_display_names(config: SDConfig) -> None:\n+def map_locale_display_names(\n+ config: SDConfig, usable_locales: Set[Locale]\n+) -> OrderedDict[str, RequestLocaleInfo]:\n \"\"\"\n Create a map of locale identifiers to names for display.\n \n@@ -183,7 +179,7 @@\n \n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n- if Locale.parse(l) not in USABLE_LOCALES:\n+ if Locale.parse(l) not in usable_locales:\n continue\n \n locale = RequestLocaleInfo(l)\n@@ -193,14 +189,13 @@\n \n locale_map[str(locale)] = locale\n \n- global LOCALES\n- LOCALES = locale_map\n+ return locale_map\n \n \n def configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n- validate_locale_configuration(config, babel)\n- map_locale_display_names(config)\n+ usable_locales = validate_locale_configuration(config, babel)\n+ app.config[\"LOCALES\"] = map_locale_display_names(config, usable_locales)\n \n \n def get_locale(config: SDConfig) -> str:\n@@ -223,7 +218,8 @@\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n \n- negotiated = negotiate_locale(preferences, LOCALES.keys())\n+ locales = current_app.config[\"LOCALES\"]\n+ negotiated = negotiate_locale(preferences, locales.keys())\n \n if not negotiated:\n raise ValueError(\"No usable locale\")\n@@ -264,4 +260,4 @@\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n- g.locales = LOCALES # pylint: disable=assigning-non-slot\n+ g.locales = current_app.config[\"LOCALES\"] # pylint: disable=assigning-non-slot\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/securedrop/i18n.py b/securedrop/i18n.py\\n--- a/securedrop/i18n.py\\n+++ b/securedrop/i18n.py\\n@@ -16,7 +16,7 @@\\n # along with this program. If not, see .\\n #\\n import collections\\n-from typing import Dict, List, Set\\n+from typing import Dict, List, OrderedDict, Set\\n \\n from babel.core import (\\n Locale,\\n@@ -25,7 +25,7 @@\\n negotiate_locale,\\n parse_locale,\\n )\\n-from flask import Flask, g, request, session\\n+from flask import Flask, current_app, g, request, session\\n from flask_babel import Babel\\n from sdconfig import FALLBACK_LOCALE, SDConfig\\n \\n@@ -128,7 +128,7 @@\\n return {Locale.parse(code) for code in codes}\\n \\n \\n-def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\\n+def validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]:\\n \\\"\\\"\\\"\\n Check that configured locales are available in the filesystem and therefore usable by\\n Babel. Warn about configured locales that are not usable, unless we're left with\\n@@ -157,16 +157,12 @@\\n f\\\"None of the default locales {defaults} are in the set of usable locales {usable}\\\"\\n )\\n \\n- global USABLE_LOCALES\\n- USABLE_LOCALES = usable\\n+ return usable\\n \\n \\n-# TODO(#6420): avoid relying on and manipulating on this global state\\n-LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\\n-USABLE_LOCALES = set() # type: Set[Locale]\\n-\\n-\\n-def map_locale_display_names(config: SDConfig) -> None:\\n+def map_locale_display_names(\\n+ config: SDConfig, usable_locales: Set[Locale]\\n+) -> OrderedDict[str, RequestLocaleInfo]:\\n \\\"\\\"\\\"\\n Create a map of locale identifiers to names for display.\\n \\n@@ -183,7 +179,7 @@\\n \\n locale_map = collections.OrderedDict()\\n for l in sorted(config.SUPPORTED_LOCALES):\\n- if Locale.parse(l) not in USABLE_LOCALES:\\n+ if Locale.parse(l) not in usable_locales:\\n continue\\n \\n locale = RequestLocaleInfo(l)\\n@@ -193,14 +189,13 @@\\n \\n locale_map[str(locale)] = locale\\n \\n- global LOCALES\\n- LOCALES = locale_map\\n+ return locale_map\\n \\n \\n def configure(config: SDConfig, app: Flask) -> None:\\n babel = configure_babel(config, app)\\n- validate_locale_configuration(config, babel)\\n- map_locale_display_names(config)\\n+ usable_locales = validate_locale_configuration(config, babel)\\n+ app.config[\\\"LOCALES\\\"] = map_locale_display_names(config, usable_locales)\\n \\n \\n def get_locale(config: SDConfig) -> str:\\n@@ -223,7 +218,8 @@\\n preferences.append(config.DEFAULT_LOCALE)\\n preferences.append(FALLBACK_LOCALE)\\n \\n- negotiated = negotiate_locale(preferences, LOCALES.keys())\\n+ locales = current_app.config[\\\"LOCALES\\\"]\\n+ negotiated = negotiate_locale(preferences, locales.keys())\\n \\n if not negotiated:\\n raise ValueError(\\\"No usable locale\\\")\\n@@ -264,4 +260,4 @@\\n locale = get_locale(config)\\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\\n session[\\\"locale\\\"] = locale\\n- g.locales = LOCALES # pylint: disable=assigning-non-slot\\n+ g.locales = current_app.config[\\\"LOCALES\\\"] # pylint: disable=assigning-non-slot\\n\", \"issue\": \"runtime `i18n` configuration manipulates global state\\nI guess we already do this for LOCALES, but I think continuing the pattern should be accompanied by a TODO that manipulating/relying on global state is not desirable.\\r\\n\\r\\n_Originally posted by @legoktm in https://github.com/freedomofpress/securedrop/pull/6406#discussion_r863080227_\\n\", \"before_files\": [{\"content\": \"#\\n# SecureDrop whistleblower submission system\\n# Copyright (C) 2017 Loic Dachary \\n#\\n# This program is free software: you can redistribute it and/or modify\\n# it under the terms of the GNU Affero General Public License as published by\\n# the Free Software Foundation, either version 3 of the License, or\\n# (at your option) any later version.\\n#\\n# This program is distributed in the hope that it will be useful,\\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n# GNU Affero General Public License for more details.\\n#\\n# You should have received a copy of the GNU Affero General Public License\\n# along with this program. If not, see .\\n#\\nimport collections\\nfrom typing import Dict, List, Set\\n\\nfrom babel.core import (\\n Locale,\\n UnknownLocaleError,\\n get_locale_identifier,\\n negotiate_locale,\\n parse_locale,\\n)\\nfrom flask import Flask, g, request, session\\nfrom flask_babel import Babel\\nfrom sdconfig import FALLBACK_LOCALE, SDConfig\\n\\n\\nclass RequestLocaleInfo:\\n \\\"\\\"\\\"\\n Convenience wrapper around a babel.core.Locale.\\n \\\"\\\"\\\"\\n\\n def __init__(self, locale: str):\\n self.locale = Locale.parse(locale)\\n\\n # This attribute can be set to `True` to differentiate multiple\\n # locales currently available (supported) for the same language.\\n self.use_display_name = False\\n\\n def __str__(self) -> str:\\n \\\"\\\"\\\"\\n The Babel string representation of the locale.\\n \\\"\\\"\\\"\\n return str(self.locale)\\n\\n @property\\n def display_name(self) -> str:\\n \\\"\\\"\\\"\\n Give callers (i.e., templates) the `Locale` object's display name when\\n such resolution is warranted, otherwise the language name---as\\n determined by `map_locale_display_names()`.\\n \\\"\\\"\\\"\\n if self.use_display_name:\\n return self.locale.display_name\\n return self.locale.language_name\\n\\n @property\\n def text_direction(self) -> str:\\n \\\"\\\"\\\"\\n The Babel text direction: ltr or rtl.\\n\\n Used primarily to set text direction in HTML via the \\\"dir\\\"\\n attribute.\\n \\\"\\\"\\\"\\n return self.locale.text_direction\\n\\n @property\\n def language(self) -> str:\\n \\\"\\\"\\\"\\n The Babel language name.\\n\\n Just the language, without subtag info like region or script.\\n \\\"\\\"\\\"\\n return self.locale.language\\n\\n @property\\n def id(self) -> str:\\n \\\"\\\"\\\"\\n The Babel string representation of the locale.\\n\\n This should match the name of the directory containing its\\n translations.\\n \\\"\\\"\\\"\\n return str(self.locale)\\n\\n @property\\n def language_tag(self) -> str:\\n \\\"\\\"\\\"\\n Returns a BCP47/RFC5646 language tag for the locale.\\n\\n Language tags are used in HTTP headers and the HTML lang\\n attribute.\\n \\\"\\\"\\\"\\n return get_locale_identifier(parse_locale(str(self.locale)), sep=\\\"-\\\")\\n\\n\\ndef configure_babel(config: SDConfig, app: Flask) -> Babel:\\n \\\"\\\"\\\"\\n Set up Flask-Babel according to the SecureDrop configuration.\\n \\\"\\\"\\\"\\n # Tell Babel where to find our translations.\\n translations_directory = str(config.TRANSLATION_DIRS.absolute())\\n app.config[\\\"BABEL_TRANSLATION_DIRECTORIES\\\"] = translations_directory\\n\\n # Create the app's Babel instance. Passing the app to the\\n # constructor causes the instance to attach itself to the app.\\n babel = Babel(app)\\n\\n # verify that Babel is only using the translations we told it about\\n if list(babel.translation_directories) != [translations_directory]:\\n raise ValueError(\\n \\\"Babel translation directories ({}) do not match SecureDrop configuration ({})\\\".format(\\n babel.translation_directories, [translations_directory]\\n )\\n )\\n\\n # register the function used to determine the locale of a request\\n babel.localeselector(lambda: get_locale(config))\\n return babel\\n\\n\\ndef parse_locale_set(codes: List[str]) -> Set[Locale]:\\n return {Locale.parse(code) for code in codes}\\n\\n\\ndef validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\\n \\\"\\\"\\\"\\n Check that configured locales are available in the filesystem and therefore usable by\\n Babel. Warn about configured locales that are not usable, unless we're left with\\n no usable default or fallback locale, in which case raise an exception.\\n \\\"\\\"\\\"\\n # These locales are available and loadable from the filesystem.\\n available = set(babel.list_translations())\\n available.add(Locale.parse(FALLBACK_LOCALE))\\n\\n # These locales were configured via \\\"securedrop-admin sdconfig\\\", meaning\\n # they were present on the Admin Workstation at \\\"securedrop-admin\\\" runtime.\\n configured = parse_locale_set(config.SUPPORTED_LOCALES)\\n\\n # The intersection of these sets is the set of locales usable by Babel.\\n usable = available & configured\\n\\n missing = configured - usable\\n if missing:\\n babel.app.logger.error(\\n f\\\"Configured locales {missing} are not in the set of usable locales {usable}\\\"\\n )\\n\\n defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])\\n if not defaults & usable:\\n raise ValueError(\\n f\\\"None of the default locales {defaults} are in the set of usable locales {usable}\\\"\\n )\\n\\n global USABLE_LOCALES\\n USABLE_LOCALES = usable\\n\\n\\n# TODO(#6420): avoid relying on and manipulating on this global state\\nLOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\\nUSABLE_LOCALES = set() # type: Set[Locale]\\n\\n\\ndef map_locale_display_names(config: SDConfig) -> None:\\n \\\"\\\"\\\"\\n Create a map of locale identifiers to names for display.\\n\\n For most of our supported languages, we only provide one\\n translation, so including the full display name is not necessary\\n to distinguish them. For languages with more than one translation,\\n like Chinese, we do need the additional detail.\\n \\\"\\\"\\\"\\n\\n language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]\\n for l in sorted(config.SUPPORTED_LOCALES):\\n locale = RequestLocaleInfo(l)\\n language_locale_counts[locale.language] += 1\\n\\n locale_map = collections.OrderedDict()\\n for l in sorted(config.SUPPORTED_LOCALES):\\n if Locale.parse(l) not in USABLE_LOCALES:\\n continue\\n\\n locale = RequestLocaleInfo(l)\\n if language_locale_counts[locale.language] > 1:\\n # Disambiguate translations for this language.\\n locale.use_display_name = True\\n\\n locale_map[str(locale)] = locale\\n\\n global LOCALES\\n LOCALES = locale_map\\n\\n\\ndef configure(config: SDConfig, app: Flask) -> None:\\n babel = configure_babel(config, app)\\n validate_locale_configuration(config, babel)\\n map_locale_display_names(config)\\n\\n\\ndef get_locale(config: SDConfig) -> str:\\n \\\"\\\"\\\"\\n Return the best supported locale for a request.\\n\\n Get the locale as follows, by order of precedence:\\n - l request argument or session['locale']\\n - browser suggested locale, from the Accept-Languages header\\n - config.DEFAULT_LOCALE\\n - config.FALLBACK_LOCALE\\n \\\"\\\"\\\"\\n preferences = []\\n if session and session.get(\\\"locale\\\"):\\n preferences.append(session.get(\\\"locale\\\"))\\n if request.args.get(\\\"l\\\"):\\n preferences.insert(0, request.args.get(\\\"l\\\"))\\n if not preferences:\\n preferences.extend(get_accepted_languages())\\n preferences.append(config.DEFAULT_LOCALE)\\n preferences.append(FALLBACK_LOCALE)\\n\\n negotiated = negotiate_locale(preferences, LOCALES.keys())\\n\\n if not negotiated:\\n raise ValueError(\\\"No usable locale\\\")\\n\\n return negotiated\\n\\n\\ndef get_accepted_languages() -> List[str]:\\n \\\"\\\"\\\"\\n Convert a request's list of accepted languages into locale identifiers.\\n \\\"\\\"\\\"\\n accept_languages = []\\n for l in request.accept_languages.values():\\n try:\\n parsed = Locale.parse(l, \\\"-\\\")\\n accept_languages.append(str(parsed))\\n\\n # We only have two Chinese translations, simplified\\n # and traditional, based on script and not\\n # region. Browsers tend to send identifiers with\\n # region, e.g. zh-CN or zh-TW. Babel can generally\\n # infer the script from those, so we can fabricate a\\n # fallback entry without region, in the hope that it\\n # will match one of our translations and the site will\\n # at least be more legible at first contact than the\\n # probable default locale of English.\\n if parsed.language == \\\"zh\\\" and parsed.script:\\n accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))\\n except (ValueError, UnknownLocaleError):\\n pass\\n return accept_languages\\n\\n\\ndef set_locale(config: SDConfig) -> None:\\n \\\"\\\"\\\"\\n Update locale info in request and session.\\n \\\"\\\"\\\"\\n locale = get_locale(config)\\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\\n session[\\\"locale\\\"] = locale\\n g.locales = LOCALES # pylint: disable=assigning-non-slot\\n\", \"path\": \"securedrop/i18n.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3329,"string":"3,329"},"num_tokens_diff":{"kind":"number","value":850,"string":"850"}}},{"rowIdx":18150,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_30297"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"nltk__nltk-926"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nBLEU score brevity penalty does not follow definition\nPapineni et al. (2002), if I understand correctly, define the BLEU brevity penalty as follows:\n- let _c_ be the length of the candidate\n- let _r_ be the length of the reference which is closest in length to the candidate\n\nThen, BP = 1 if c > r, and BP = exp(1 - r / c) otherwise.\n\n(There are some details about doing this at the corpus level, but they're not relevant to the point at hand.) \n\nBut this not what `nltk.align.bleu_score._brevity_penalty` (called by `nltk.align.bleu_score`) computes. Rather, it computes _r_ as follows:\n\n```\nc = len(candidate)\nr = min(abs(len(r) - c) for r in references)\n```\n\nSo if _c_ is 12 and _r_ under Papineni et al.'s definition is 28, then this function will set `r` to 16, not 28.\n\nHowever, it is possible I have misunderstood the original paper. And it would be ideal to test this against a canonical implementation. But if I have understood correctly, the fix would be something like:\n\n```\nc = len(candidate)\nref_lens = [len(ref) for ref in references]\nr = min(ref_lens, key=lambda ref_len: abs(ref_len - c))\n```\n\n\n\n\n[start of nltk/align/bleu_score.py]\n1 # -*- coding: utf-8 -*-\n2 # Natural Language Toolkit: BLEU Score\n3 #\n4 # Copyright (C) 2001-2015 NLTK Project\n5 # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n6 # Contributors: Dmitrijs Milajevs\n7 # URL: \n8 # For license information, see LICENSE.TXT\n9 \"\"\"BLEU score implementation.\"\"\"\n10 \n11 from __future__ import division\n12 \n13 import math\n14 \n15 from nltk.tokenize import word_tokenize\n16 from nltk.compat import Counter\n17 from nltk.util import ngrams\n18 \n19 \n20 def bleu(candidate, references, weights):\n21 \"\"\"Calculate BLEU score (Bilingual Evaluation Understudy)\n22 \n23 :param candidate: a candidate sentence\n24 :type candidate: list(str)\n25 :param references: reference sentences\n26 :type references: list(list(str))\n27 :param weights: weights for unigrams, bigrams, trigrams and so on\n28 :type weights: list(float)\n29 \n30 >>> weights = [0.25, 0.25, 0.25, 0.25]\n31 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n32 ... 'ensures', 'that', 'the', 'military', 'always',\n33 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n34 \n35 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n36 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n37 ... 'that', 'party', 'direct']\n38 \n39 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n40 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n41 ... 'heed', 'Party', 'commands']\n42 \n43 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n44 ... 'guarantees', 'the', 'military', 'forces', 'always',\n45 ... 'being', 'under', 'the', 'command', 'of', 'the',\n46 ... 'Party']\n47 \n48 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n49 ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n50 ... 'of', 'the', 'party']\n51 \n52 >>> bleu(candidate1, [reference1, reference2, reference3], weights)\n53 0.504...\n54 \n55 >>> bleu(candidate2, [reference1, reference2, reference3], weights)\n56 0\n57 \n58 Papineni, Kishore, et al. \"BLEU: A method for automatic evaluation of\n59 machine translation.\" Proceedings of the 40th annual meeting on association for\n60 computational linguistics. Association for Computational Linguistics, 2002.\n61 http://www.aclweb.org/anthology/P02-1040.pdf\n62 \n63 \"\"\"\n64 p_ns = (\n65 _modified_precision(candidate, references, i)\n66 for i, _ in enumerate(weights, start=1)\n67 )\n68 \n69 try:\n70 s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))\n71 except ValueError:\n72 # some p_ns is 0\n73 return 0\n74 \n75 bp = _brevity_penalty(candidate, references)\n76 return bp * math.exp(s)\n77 \n78 \n79 def _modified_precision(candidate, references, n):\n80 \"\"\"Calculate modified ngram precision.\n81 \n82 The normal precision method may lead to some wrong translations with\n83 high-precision, e.g., the translation, in which a word of reference\n84 repeats several times, has very high precision. So in the modified\n85 n-gram precision, a reference word will be considered exhausted after\n86 a matching candidate word is identified.\n87 \n88 Paper examples:\n89 \n90 >>> _modified_precision(\n91 ... 'the the the the the the the'.split(),\n92 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n93 ... n=1,\n94 ... )\n95 0.28...\n96 \n97 >>> _modified_precision(\n98 ... 'the the the the the the the'.split(),\n99 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n100 ... n=2,\n101 ... )\n102 0.0\n103 \n104 >>> _modified_precision(\n105 ... 'of the'.split(),\n106 ... [\n107 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n108 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n109 ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n110 ... ],\n111 ... n=1,\n112 ... )\n113 1.0\n114 \n115 >>> _modified_precision(\n116 ... 'of the'.split(),\n117 ... [\n118 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n119 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n120 ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n121 ... ],\n122 ... n=2,\n123 ... )\n124 1.0\n125 \n126 More examples:\n127 \n128 >>> weights = [0.25, 0.25, 0.25, 0.25]\n129 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n130 ... 'ensures', 'that', 'the', 'military', 'always',\n131 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n132 \n133 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n134 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n135 ... 'that', 'party', 'direct']\n136 \n137 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n138 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n139 ... 'heed', 'Party', 'commands']\n140 \n141 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n142 ... 'guarantees', 'the', 'military', 'forces', 'always',\n143 ... 'being', 'under', 'the', 'command', 'of', 'the',\n144 ... 'Party']\n145 \n146 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n147 ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n148 ... 'of', 'the', 'party']\n149 \n150 Unigrams:\n151 \n152 >>> _modified_precision(\n153 ... candidate1,\n154 ... [reference1, reference2, reference3],\n155 ... n=1,\n156 ... )\n157 0.94...\n158 \n159 >>> _modified_precision(\n160 ... candidate2,\n161 ... [reference1, reference2, reference3],\n162 ... n=1,\n163 ... )\n164 0.57...\n165 \n166 Bigrams:\n167 \n168 >>> _modified_precision(\n169 ... candidate1,\n170 ... [reference1, reference2, reference3],\n171 ... n=2,\n172 ... )\n173 0.58...\n174 \n175 >>> _modified_precision(\n176 ... candidate2,\n177 ... [reference1, reference2, reference3],\n178 ... n=2,\n179 ... )\n180 0.07...\n181 \n182 \"\"\"\n183 counts = Counter(ngrams(candidate, n))\n184 \n185 if not counts:\n186 return 0\n187 \n188 max_counts = {}\n189 for reference in references:\n190 reference_counts = Counter(ngrams(reference, n))\n191 for ngram in counts:\n192 max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n193 \n194 clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())\n195 \n196 return sum(clipped_counts.values()) / sum(counts.values())\n197 \n198 \n199 def _brevity_penalty(candidate, references):\n200 \"\"\"Calculate brevity penalty.\n201 \n202 As the modified n-gram precision still has the problem from the short\n203 length sentence, brevity penalty is used to modify the overall BLEU\n204 score according to length.\n205 \n206 \"\"\"\n207 c = len(candidate)\n208 r = min(abs(len(r) - c) for r in references)\n209 \n210 if c > r:\n211 return 1\n212 else:\n213 return math.exp(1 - r / c)\n214 \n215 \n216 # run doctests\n217 if __name__ == \"__main__\":\n218 import doctest\n219 doctest.testmod(optionflags=doctest.ELLIPSIS)\n220\n[end of nltk/align/bleu_score.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/nltk/align/bleu_score.py b/nltk/align/bleu_score.py\n--- a/nltk/align/bleu_score.py\n+++ b/nltk/align/bleu_score.py\n@@ -203,9 +203,57 @@\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n \n+ An example from the paper. There are three references with length 12, 15\n+ and 17. And a terse candidate of the length 12. The brevity penalty is 1.\n+\n+ >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 1.0\n+\n+ In case a candidate translation is shorter than the references, penalty is\n+ applied.\n+\n+ >>> references = [['a'] * 28, ['a'] * 28]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 0.2635...\n+\n+ The length of the closest reference is used to compute the penalty. If the\n+ length of a candidate is 12, and the reference lengths are 13 and 2, the\n+ penalty is applied because the candidate length (12) is less then the\n+ closest reference length (13).\n+\n+ >>> references = [['a'] * 13, ['a'] * 2]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 0.92...\n+\n+ The brevity penalty doesn't depend on reference order. More importantly,\n+ when two reference sentences are at the same distance, the shortest\n+ reference sentence length is used.\n+\n+ >>> references = [['a'] * 13, ['a'] * 11]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1\n+ True\n+\n+ A test example from mteval-v13a.pl (starting from the line 705):\n+\n+ >>> references = [['a'] * 11, ['a'] * 8]\n+ >>> candidate = ['a'] * 7\n+ >>> _brevity_penalty(candidate, references)\n+ 0.86...\n+\n+ >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n+ >>> candidate = ['a'] * 7\n+ >>> _brevity_penalty(candidate, references)\n+ 1.0\n+\n \"\"\"\n c = len(candidate)\n- r = min(abs(len(r) - c) for r in references)\n+ ref_lens = (len(reference) for reference in references)\n+ r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))\n \n if c > r:\n return 1\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/nltk/align/bleu_score.py b/nltk/align/bleu_score.py\\n--- a/nltk/align/bleu_score.py\\n+++ b/nltk/align/bleu_score.py\\n@@ -203,9 +203,57 @@\\n length sentence, brevity penalty is used to modify the overall BLEU\\n score according to length.\\n \\n+ An example from the paper. There are three references with length 12, 15\\n+ and 17. And a terse candidate of the length 12. The brevity penalty is 1.\\n+\\n+ >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]\\n+ >>> candidate = ['a'] * 12\\n+ >>> _brevity_penalty(candidate, references)\\n+ 1.0\\n+\\n+ In case a candidate translation is shorter than the references, penalty is\\n+ applied.\\n+\\n+ >>> references = [['a'] * 28, ['a'] * 28]\\n+ >>> candidate = ['a'] * 12\\n+ >>> _brevity_penalty(candidate, references)\\n+ 0.2635...\\n+\\n+ The length of the closest reference is used to compute the penalty. If the\\n+ length of a candidate is 12, and the reference lengths are 13 and 2, the\\n+ penalty is applied because the candidate length (12) is less then the\\n+ closest reference length (13).\\n+\\n+ >>> references = [['a'] * 13, ['a'] * 2]\\n+ >>> candidate = ['a'] * 12\\n+ >>> _brevity_penalty(candidate, references)\\n+ 0.92...\\n+\\n+ The brevity penalty doesn't depend on reference order. More importantly,\\n+ when two reference sentences are at the same distance, the shortest\\n+ reference sentence length is used.\\n+\\n+ >>> references = [['a'] * 13, ['a'] * 11]\\n+ >>> candidate = ['a'] * 12\\n+ >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1\\n+ True\\n+\\n+ A test example from mteval-v13a.pl (starting from the line 705):\\n+\\n+ >>> references = [['a'] * 11, ['a'] * 8]\\n+ >>> candidate = ['a'] * 7\\n+ >>> _brevity_penalty(candidate, references)\\n+ 0.86...\\n+\\n+ >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\\n+ >>> candidate = ['a'] * 7\\n+ >>> _brevity_penalty(candidate, references)\\n+ 1.0\\n+\\n \\\"\\\"\\\"\\n c = len(candidate)\\n- r = min(abs(len(r) - c) for r in references)\\n+ ref_lens = (len(reference) for reference in references)\\n+ r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))\\n \\n if c > r:\\n return 1\\n\", \"issue\": \"BLEU score brevity penalty does not follow definition\\nPapineni et al. (2002), if I understand correctly, define the BLEU brevity penalty as follows:\\n- let _c_ be the length of the candidate\\n- let _r_ be the length of the reference which is closest in length to the candidate\\n\\nThen, BP = 1 if c > r, and BP = exp(1 - r / c) otherwise.\\n\\n(There are some details about doing this at the corpus level, but they're not relevant to the point at hand.) \\n\\nBut this not what `nltk.align.bleu_score._brevity_penalty` (called by `nltk.align.bleu_score`) computes. Rather, it computes _r_ as follows:\\n\\n```\\nc = len(candidate)\\nr = min(abs(len(r) - c) for r in references)\\n```\\n\\nSo if _c_ is 12 and _r_ under Papineni et al.'s definition is 28, then this function will set `r` to 16, not 28.\\n\\nHowever, it is possible I have misunderstood the original paper. And it would be ideal to test this against a canonical implementation. But if I have understood correctly, the fix would be something like:\\n\\n```\\nc = len(candidate)\\nref_lens = [len(ref) for ref in references]\\nr = min(ref_lens, key=lambda ref_len: abs(ref_len - c))\\n```\\n\\n\", \"before_files\": [{\"content\": \"# -*- coding: utf-8 -*-\\n# Natural Language Toolkit: BLEU Score\\n#\\n# Copyright (C) 2001-2015 NLTK Project\\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\\n# Contributors: Dmitrijs Milajevs\\n# URL: \\n# For license information, see LICENSE.TXT\\n\\\"\\\"\\\"BLEU score implementation.\\\"\\\"\\\"\\n\\nfrom __future__ import division\\n\\nimport math\\n\\nfrom nltk.tokenize import word_tokenize\\nfrom nltk.compat import Counter\\nfrom nltk.util import ngrams\\n\\n\\ndef bleu(candidate, references, weights):\\n \\\"\\\"\\\"Calculate BLEU score (Bilingual Evaluation Understudy)\\n\\n :param candidate: a candidate sentence\\n :type candidate: list(str)\\n :param references: reference sentences\\n :type references: list(list(str))\\n :param weights: weights for unigrams, bigrams, trigrams and so on\\n :type weights: list(float)\\n\\n >>> weights = [0.25, 0.25, 0.25, 0.25]\\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\\n ... 'ensures', 'that', 'the', 'military', 'always',\\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\\n\\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\\n ... 'that', 'party', 'direct']\\n\\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\\n ... 'heed', 'Party', 'commands']\\n\\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\\n ... 'guarantees', 'the', 'military', 'forces', 'always',\\n ... 'being', 'under', 'the', 'command', 'of', 'the',\\n ... 'Party']\\n\\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\\n ... 'of', 'the', 'party']\\n\\n >>> bleu(candidate1, [reference1, reference2, reference3], weights)\\n 0.504...\\n\\n >>> bleu(candidate2, [reference1, reference2, reference3], weights)\\n 0\\n\\n Papineni, Kishore, et al. \\\"BLEU: A method for automatic evaluation of\\n machine translation.\\\" Proceedings of the 40th annual meeting on association for\\n computational linguistics. Association for Computational Linguistics, 2002.\\n http://www.aclweb.org/anthology/P02-1040.pdf\\n\\n \\\"\\\"\\\"\\n p_ns = (\\n _modified_precision(candidate, references, i)\\n for i, _ in enumerate(weights, start=1)\\n )\\n\\n try:\\n s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))\\n except ValueError:\\n # some p_ns is 0\\n return 0\\n\\n bp = _brevity_penalty(candidate, references)\\n return bp * math.exp(s)\\n\\n\\ndef _modified_precision(candidate, references, n):\\n \\\"\\\"\\\"Calculate modified ngram precision.\\n\\n The normal precision method may lead to some wrong translations with\\n high-precision, e.g., the translation, in which a word of reference\\n repeats several times, has very high precision. So in the modified\\n n-gram precision, a reference word will be considered exhausted after\\n a matching candidate word is identified.\\n\\n Paper examples:\\n\\n >>> _modified_precision(\\n ... 'the the the the the the the'.split(),\\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\\n ... n=1,\\n ... )\\n 0.28...\\n\\n >>> _modified_precision(\\n ... 'the the the the the the the'.split(),\\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\\n ... n=2,\\n ... )\\n 0.0\\n\\n >>> _modified_precision(\\n ... 'of the'.split(),\\n ... [\\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\\n ... ],\\n ... n=1,\\n ... )\\n 1.0\\n\\n >>> _modified_precision(\\n ... 'of the'.split(),\\n ... [\\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\\n ... ],\\n ... n=2,\\n ... )\\n 1.0\\n\\n More examples:\\n\\n >>> weights = [0.25, 0.25, 0.25, 0.25]\\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\\n ... 'ensures', 'that', 'the', 'military', 'always',\\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\\n\\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\\n ... 'that', 'party', 'direct']\\n\\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\\n ... 'heed', 'Party', 'commands']\\n\\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\\n ... 'guarantees', 'the', 'military', 'forces', 'always',\\n ... 'being', 'under', 'the', 'command', 'of', 'the',\\n ... 'Party']\\n\\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\\n ... 'of', 'the', 'party']\\n\\n Unigrams:\\n\\n >>> _modified_precision(\\n ... candidate1,\\n ... [reference1, reference2, reference3],\\n ... n=1,\\n ... )\\n 0.94...\\n\\n >>> _modified_precision(\\n ... candidate2,\\n ... [reference1, reference2, reference3],\\n ... n=1,\\n ... )\\n 0.57...\\n\\n Bigrams:\\n\\n >>> _modified_precision(\\n ... candidate1,\\n ... [reference1, reference2, reference3],\\n ... n=2,\\n ... )\\n 0.58...\\n\\n >>> _modified_precision(\\n ... candidate2,\\n ... [reference1, reference2, reference3],\\n ... n=2,\\n ... )\\n 0.07...\\n\\n \\\"\\\"\\\"\\n counts = Counter(ngrams(candidate, n))\\n\\n if not counts:\\n return 0\\n\\n max_counts = {}\\n for reference in references:\\n reference_counts = Counter(ngrams(reference, n))\\n for ngram in counts:\\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\\n\\n clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())\\n\\n return sum(clipped_counts.values()) / sum(counts.values())\\n\\n\\ndef _brevity_penalty(candidate, references):\\n \\\"\\\"\\\"Calculate brevity penalty.\\n\\n As the modified n-gram precision still has the problem from the short\\n length sentence, brevity penalty is used to modify the overall BLEU\\n score according to length.\\n\\n \\\"\\\"\\\"\\n c = len(candidate)\\n r = min(abs(len(r) - c) for r in references)\\n\\n if c > r:\\n return 1\\n else:\\n return math.exp(1 - r / c)\\n\\n\\n# run doctests\\nif __name__ == \\\"__main__\\\":\\n import doctest\\n doctest.testmod(optionflags=doctest.ELLIPSIS)\\n\", \"path\": \"nltk/align/bleu_score.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3458,"string":"3,458"},"num_tokens_diff":{"kind":"number","value":728,"string":"728"}}},{"rowIdx":18151,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_64325"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pex-tool__pex-1725"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nRelease 2.1.80\nOn the docket:\r\n+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)\r\n+ [x] Fix more pathologic lock creation slowness. (#1723)\n\n\n\n[start of pex/version.py]\n1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n2 # Licensed under the Apache License, Version 2.0 (see LICENSE).\n3 \n4 __version__ = \"2.1.79\"\n5\n[end of pex/version.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.79\"\n+__version__ = \"2.1.80\"\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pex/version.py b/pex/version.py\\n--- a/pex/version.py\\n+++ b/pex/version.py\\n@@ -1,4 +1,4 @@\\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\\n \\n-__version__ = \\\"2.1.79\\\"\\n+__version__ = \\\"2.1.80\\\"\\n\", \"issue\": \"Release 2.1.80\\nOn the docket:\\r\\n+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)\\r\\n+ [x] Fix more pathologic lock creation slowness. (#1723)\\n\", \"before_files\": [{\"content\": \"# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\\n\\n__version__ = \\\"2.1.79\\\"\\n\", \"path\": \"pex/version.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":640,"string":"640"},"num_tokens_diff":{"kind":"number","value":96,"string":"96"}}},{"rowIdx":18152,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_43869"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"aws__aws-cli-3331"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\naws configure get and aws configure set with multiword profile names are inconsistent\nIt seems that `aws configure set --profile \"two words\"` will add single quotes around the profile name, but `aws configure get --profile \"two words\"` will search for a profile name that does not have single quotes around the profile name.\r\n\r\nThese two methods should behave in a similar manner.\r\n\r\nTo reproduce:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10\r\n$ aws configure set aws_access_key_id test --profile \"test profile\"\r\n$ aws configure get aws_access_key_id --profile \"test profile\"\r\nThe config profile (test profile) could not be found\r\n$ aws configure get aws_access_key_id --profile \"'test profile'\"\r\ntest\r\n```\n\n\n\n[start of awscli/customizations/configure/set.py]\n1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\"). You\n4 # may not use this file except in compliance with the License. A copy of\n5 # the License is located at\n6 #\n7 # http://aws.amazon.com/apache2.0/\n8 #\n9 # or in the \"license\" file accompanying this file. This file is\n10 # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n11 # ANY KIND, either express or implied. See the License for the specific\n12 # language governing permissions and limitations under the License.\n13 import os\n14 \n15 from awscli.customizations.commands import BasicCommand\n16 from awscli.customizations.configure.writer import ConfigFileWriter\n17 \n18 from . import PREDEFINED_SECTION_NAMES, profile_to_section\n19 \n20 \n21 class ConfigureSetCommand(BasicCommand):\n22 NAME = 'set'\n23 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n24 '_description.rst')\n25 SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n26 EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n27 ARG_TABLE = [\n28 {'name': 'varname',\n29 'help_text': 'The name of the config value to set.',\n30 'action': 'store',\n31 'cli_type_name': 'string', 'positional_arg': True},\n32 {'name': 'value',\n33 'help_text': 'The value to set.',\n34 'action': 'store',\n35 'no_paramfile': True, # To disable the default paramfile behavior\n36 'cli_type_name': 'string', 'positional_arg': True},\n37 ]\n38 # Any variables specified in this list will be written to\n39 # the ~/.aws/credentials file instead of ~/.aws/config.\n40 _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n41 'aws_session_token']\n42 \n43 def __init__(self, session, config_writer=None):\n44 super(ConfigureSetCommand, self).__init__(session)\n45 if config_writer is None:\n46 config_writer = ConfigFileWriter()\n47 self._config_writer = config_writer\n48 \n49 def _run_main(self, args, parsed_globals):\n50 varname = args.varname\n51 value = args.value\n52 section = 'default'\n53 # Before handing things off to the config writer,\n54 # we need to find out three things:\n55 # 1. What section we're writing to (section).\n56 # 2. The name of the config key (varname)\n57 # 3. The actual value (value).\n58 if '.' not in varname:\n59 # unqualified name, scope it to the current\n60 # profile (or leave it as the 'default' section if\n61 # no profile is set).\n62 if self._session.profile is not None:\n63 section = profile_to_section(self._session.profile)\n64 else:\n65 # First figure out if it's been scoped to a profile.\n66 parts = varname.split('.')\n67 if parts[0] in ('default', 'profile'):\n68 # Then we know we're scoped to a profile.\n69 if parts[0] == 'default':\n70 section = 'default'\n71 remaining = parts[1:]\n72 else:\n73 # [profile, profile_name, ...]\n74 section = profile_to_section(parts[1])\n75 remaining = parts[2:]\n76 varname = remaining[0]\n77 if len(remaining) == 2:\n78 value = {remaining[1]: value}\n79 elif parts[0] not in PREDEFINED_SECTION_NAMES:\n80 if self._session.profile is not None:\n81 section = profile_to_section(self._session.profile)\n82 else:\n83 profile_name = self._session.get_config_variable('profile')\n84 if profile_name is not None:\n85 section = profile_name\n86 varname = parts[0]\n87 if len(parts) == 2:\n88 value = {parts[1]: value}\n89 elif len(parts) == 2:\n90 # Otherwise it's something like \"set preview.service true\"\n91 # of something in the [plugin] section.\n92 section, varname = parts\n93 config_filename = os.path.expanduser(\n94 self._session.get_config_variable('config_file'))\n95 updated_config = {'__section__': section, varname: value}\n96 if varname in self._WRITE_TO_CREDS_FILE:\n97 config_filename = os.path.expanduser(\n98 self._session.get_config_variable('credentials_file'))\n99 section_name = updated_config['__section__']\n100 if section_name.startswith('profile '):\n101 updated_config['__section__'] = section_name[8:]\n102 self._config_writer.update_config(updated_config, config_filename)\n103\n[end of awscli/customizations/configure/set.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py\n--- a/awscli/customizations/configure/set.py\n+++ b/awscli/customizations/configure/set.py\n@@ -46,13 +46,17 @@\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n \n+ def _get_config_file(self, path):\n+ config_path = self._session.get_config_variable(path)\n+ return os.path.expanduser(config_path)\n+\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n- section = 'default'\n+ profile = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n- # 1. What section we're writing to (section).\n+ # 1. What section we're writing to (profile).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n@@ -60,43 +64,44 @@\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n- section = 'default'\n+ profile = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n- section = profile_to_section(parts[1])\n+ profile = parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n- section = profile_name\n+ profile = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n- section, varname = parts\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('config_file'))\n- updated_config = {'__section__': section, varname: value}\n+ profile, varname = parts\n+ config_filename = self._get_config_file('config_file')\n if varname in self._WRITE_TO_CREDS_FILE:\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('credentials_file'))\n- section_name = updated_config['__section__']\n- if section_name.startswith('profile '):\n- updated_config['__section__'] = section_name[8:]\n+ # When writing to the creds file, the section is just the profile\n+ section = profile\n+ config_filename = self._get_config_file('credentials_file')\n+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\n+ section = profile\n+ else:\n+ section = profile_to_section(profile)\n+ updated_config = {'__section__': section, varname: value}\n self._config_writer.update_config(updated_config, config_filename)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py\\n--- a/awscli/customizations/configure/set.py\\n+++ b/awscli/customizations/configure/set.py\\n@@ -46,13 +46,17 @@\\n config_writer = ConfigFileWriter()\\n self._config_writer = config_writer\\n \\n+ def _get_config_file(self, path):\\n+ config_path = self._session.get_config_variable(path)\\n+ return os.path.expanduser(config_path)\\n+\\n def _run_main(self, args, parsed_globals):\\n varname = args.varname\\n value = args.value\\n- section = 'default'\\n+ profile = 'default'\\n # Before handing things off to the config writer,\\n # we need to find out three things:\\n- # 1. What section we're writing to (section).\\n+ # 1. What section we're writing to (profile).\\n # 2. The name of the config key (varname)\\n # 3. The actual value (value).\\n if '.' not in varname:\\n@@ -60,43 +64,44 @@\\n # profile (or leave it as the 'default' section if\\n # no profile is set).\\n if self._session.profile is not None:\\n- section = profile_to_section(self._session.profile)\\n+ profile = self._session.profile\\n else:\\n # First figure out if it's been scoped to a profile.\\n parts = varname.split('.')\\n if parts[0] in ('default', 'profile'):\\n # Then we know we're scoped to a profile.\\n if parts[0] == 'default':\\n- section = 'default'\\n+ profile = 'default'\\n remaining = parts[1:]\\n else:\\n # [profile, profile_name, ...]\\n- section = profile_to_section(parts[1])\\n+ profile = parts[1]\\n remaining = parts[2:]\\n varname = remaining[0]\\n if len(remaining) == 2:\\n value = {remaining[1]: value}\\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\\n if self._session.profile is not None:\\n- section = profile_to_section(self._session.profile)\\n+ profile = self._session.profile\\n else:\\n profile_name = self._session.get_config_variable('profile')\\n if profile_name is not None:\\n- section = profile_name\\n+ profile = profile_name\\n varname = parts[0]\\n if len(parts) == 2:\\n value = {parts[1]: value}\\n elif len(parts) == 2:\\n # Otherwise it's something like \\\"set preview.service true\\\"\\n # of something in the [plugin] section.\\n- section, varname = parts\\n- config_filename = os.path.expanduser(\\n- self._session.get_config_variable('config_file'))\\n- updated_config = {'__section__': section, varname: value}\\n+ profile, varname = parts\\n+ config_filename = self._get_config_file('config_file')\\n if varname in self._WRITE_TO_CREDS_FILE:\\n- config_filename = os.path.expanduser(\\n- self._session.get_config_variable('credentials_file'))\\n- section_name = updated_config['__section__']\\n- if section_name.startswith('profile '):\\n- updated_config['__section__'] = section_name[8:]\\n+ # When writing to the creds file, the section is just the profile\\n+ section = profile\\n+ config_filename = self._get_config_file('credentials_file')\\n+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\\n+ section = profile\\n+ else:\\n+ section = profile_to_section(profile)\\n+ updated_config = {'__section__': section, varname: value}\\n self._config_writer.update_config(updated_config, config_filename)\\n\", \"issue\": \"aws configure get and aws configure set with multiword profile names are inconsistent\\nIt seems that `aws configure set --profile \\\"two words\\\"` will add single quotes around the profile name, but `aws configure get --profile \\\"two words\\\"` will search for a profile name that does not have single quotes around the profile name.\\r\\n\\r\\nThese two methods should behave in a similar manner.\\r\\n\\r\\nTo reproduce:\\r\\n\\r\\n```\\r\\n$ aws --version\\r\\naws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10\\r\\n$ aws configure set aws_access_key_id test --profile \\\"test profile\\\"\\r\\n$ aws configure get aws_access_key_id --profile \\\"test profile\\\"\\r\\nThe config profile (test profile) could not be found\\r\\n$ aws configure get aws_access_key_id --profile \\\"'test profile'\\\"\\r\\ntest\\r\\n```\\n\", \"before_files\": [{\"content\": \"# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\"). You\\n# may not use this file except in compliance with the License. A copy of\\n# the License is located at\\n#\\n# http://aws.amazon.com/apache2.0/\\n#\\n# or in the \\\"license\\\" file accompanying this file. This file is\\n# distributed on an \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\\n# ANY KIND, either express or implied. See the License for the specific\\n# language governing permissions and limitations under the License.\\nimport os\\n\\nfrom awscli.customizations.commands import BasicCommand\\nfrom awscli.customizations.configure.writer import ConfigFileWriter\\n\\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\\n\\n\\nclass ConfigureSetCommand(BasicCommand):\\n NAME = 'set'\\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\\n '_description.rst')\\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\\n ARG_TABLE = [\\n {'name': 'varname',\\n 'help_text': 'The name of the config value to set.',\\n 'action': 'store',\\n 'cli_type_name': 'string', 'positional_arg': True},\\n {'name': 'value',\\n 'help_text': 'The value to set.',\\n 'action': 'store',\\n 'no_paramfile': True, # To disable the default paramfile behavior\\n 'cli_type_name': 'string', 'positional_arg': True},\\n ]\\n # Any variables specified in this list will be written to\\n # the ~/.aws/credentials file instead of ~/.aws/config.\\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\\n 'aws_session_token']\\n\\n def __init__(self, session, config_writer=None):\\n super(ConfigureSetCommand, self).__init__(session)\\n if config_writer is None:\\n config_writer = ConfigFileWriter()\\n self._config_writer = config_writer\\n\\n def _run_main(self, args, parsed_globals):\\n varname = args.varname\\n value = args.value\\n section = 'default'\\n # Before handing things off to the config writer,\\n # we need to find out three things:\\n # 1. What section we're writing to (section).\\n # 2. The name of the config key (varname)\\n # 3. The actual value (value).\\n if '.' not in varname:\\n # unqualified name, scope it to the current\\n # profile (or leave it as the 'default' section if\\n # no profile is set).\\n if self._session.profile is not None:\\n section = profile_to_section(self._session.profile)\\n else:\\n # First figure out if it's been scoped to a profile.\\n parts = varname.split('.')\\n if parts[0] in ('default', 'profile'):\\n # Then we know we're scoped to a profile.\\n if parts[0] == 'default':\\n section = 'default'\\n remaining = parts[1:]\\n else:\\n # [profile, profile_name, ...]\\n section = profile_to_section(parts[1])\\n remaining = parts[2:]\\n varname = remaining[0]\\n if len(remaining) == 2:\\n value = {remaining[1]: value}\\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\\n if self._session.profile is not None:\\n section = profile_to_section(self._session.profile)\\n else:\\n profile_name = self._session.get_config_variable('profile')\\n if profile_name is not None:\\n section = profile_name\\n varname = parts[0]\\n if len(parts) == 2:\\n value = {parts[1]: value}\\n elif len(parts) == 2:\\n # Otherwise it's something like \\\"set preview.service true\\\"\\n # of something in the [plugin] section.\\n section, varname = parts\\n config_filename = os.path.expanduser(\\n self._session.get_config_variable('config_file'))\\n updated_config = {'__section__': section, varname: value}\\n if varname in self._WRITE_TO_CREDS_FILE:\\n config_filename = os.path.expanduser(\\n self._session.get_config_variable('credentials_file'))\\n section_name = updated_config['__section__']\\n if section_name.startswith('profile '):\\n updated_config['__section__'] = section_name[8:]\\n self._config_writer.update_config(updated_config, config_filename)\\n\", \"path\": \"awscli/customizations/configure/set.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1920,"string":"1,920"},"num_tokens_diff":{"kind":"number","value":833,"string":"833"}}},{"rowIdx":18153,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_41410"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"qtile__qtile-1943"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nPopups and child-windows are drawn behind parent windows in certain situations\n# Issue description\r\nIn some situations I noticed that popups or child-windows are drawn (or moved) behind their respective parents; for child-windows this can be quite a problem in case the child is blocking, since then the software might be completely unresponsive.\r\n\r\nPlaces where I have noticed this and that I remember:\r\n1. Virtualbox (fullscreen) (the main window, not a client): Context menus on VM-list open in the background.\r\n2. Veracrypt (floating) \"mount\" child-window: The window either spawns in the back or is moved there once it loses focus.\r\n\r\nThere have been some other cases, but due to testing PR #1870, which I initially expected to be the cause for this, I haven't taken note of all of them.\r\nSome software definitely has working popup-windows, so Virtualbox might be doing something weird.\r\n\r\n# Qtile version\r\n\r\nhttps://github.com/qtile/qtile/commit/c6b80e9444e9b4e5dc2f52ca1954dc502c45f2eb\r\n(newer versions not tested yet)\r\n\r\n# Configuration\r\n\r\nI'll only include one snippet here, since verything else (setting up Keys and Widgets) most likely has nothing to do with the problem.\r\n\r\n```python\r\n@hook.subscribe.client_new\r\ndef modify_window(client):\r\n if (client.window.get_wm_transient_for() or client.window.get_wm_type() in floating_types):\r\n client.floating = True\r\n```\r\n\r\nOn top of this I am currently using fake screens instead of normal screens.\r\n\r\nIt would be nice if somebody could confirm this, since I am not entirely sure this isn't all caused by my own PR.\r\n\r\nRelated to #1870, which could fix at least the described issue with child-windows.\n\n\n\n[start of libqtile/layout/floating.py]\n1 # Copyright (c) 2010 matt\n2 # Copyright (c) 2010-2011 Paul Colomiets\n3 # Copyright (c) 2011 Mounier Florian\n4 # Copyright (c) 2012 Craig Barnes\n5 # Copyright (c) 2012, 2014-2015 Tycho Andersen\n6 # Copyright (c) 2013 Tao Sauvage\n7 # Copyright (c) 2013 Julien Iguchi-Cartigny\n8 # Copyright (c) 2014 ramnes\n9 # Copyright (c) 2014 Sean Vig\n10 # Copyright (c) 2014 dequis\n11 # Copyright (c) 2018 Nazar Mokrynskyi\n12 #\n13 # Permission is hereby granted, free of charge, to any person obtaining a copy\n14 # of this software and associated documentation files (the \"Software\"), to deal\n15 # in the Software without restriction, including without limitation the rights\n16 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n17 # copies of the Software, and to permit persons to whom the Software is\n18 # furnished to do so, subject to the following conditions:\n19 #\n20 # The above copyright notice and this permission notice shall be included in\n21 # all copies or substantial portions of the Software.\n22 #\n23 # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n24 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n25 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n26 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n27 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n28 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n29 # SOFTWARE.\n30 \n31 import warnings\n32 \n33 from libqtile.config import Match\n34 from libqtile.layout.base import Layout\n35 from libqtile.log_utils import logger\n36 \n37 \n38 class Floating(Layout):\n39 \"\"\"\n40 Floating layout, which does nothing with windows but handles focus order\n41 \"\"\"\n42 defaults = [\n43 (\"border_focus\", \"#0000ff\", \"Border colour for the focused window.\"),\n44 (\"border_normal\", \"#000000\", \"Border colour for un-focused windows.\"),\n45 (\"border_width\", 1, \"Border width.\"),\n46 (\"max_border_width\", 0, \"Border width for maximize.\"),\n47 (\"fullscreen_border_width\", 0, \"Border width for fullscreen.\"),\n48 (\"name\", \"floating\", \"Name of this layout.\"),\n49 ]\n50 \n51 def __init__(self, float_rules=None, no_reposition_rules=None, **config):\n52 \"\"\"\n53 If you have certain apps that you always want to float you can provide\n54 ``float_rules`` to do so. ``float_rules`` are a list of\n55 Match objects::\n56 \n57 from libqtile.config import Match\n58 Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)\n59 \n60 When a new window is opened its ``match`` method is called with each of\n61 these rules. If one matches, the window will float. The following\n62 will float GIMP and Skype::\n63 \n64 from libqtile.config import Match\n65 float_rules=[Match(wm_class=\"skype\"), Match(wm_class=\"gimp\")]\n66 \n67 Specify these in the ``floating_layout`` in your config.\n68 \n69 Floating layout will try to center most of floating windows by default,\n70 but if you don't want this to happen for certain windows that are\n71 centered by mistake, you can use ``no_reposition_rules`` option to\n72 specify them and layout will rely on windows to position themselves in\n73 correct location on the screen.\n74 \"\"\"\n75 Layout.__init__(self, **config)\n76 self.clients = []\n77 self.focused = None\n78 self.group = None\n79 self.float_rules = float_rules or []\n80 \n81 warned = False\n82 for index, rule in enumerate(self.float_rules):\n83 if isinstance(rule, Match):\n84 continue\n85 \n86 if not warned:\n87 message = \"Non-config.Match objects in float_rules are \" \\\n88 \"deprecated\"\n89 warnings.warn(message, DeprecationWarning)\n90 logger.warning(message)\n91 warned = True\n92 \n93 match = Match(\n94 title=rule.get(\"wname\"), wm_class=rule.get(\"wmclass\"),\n95 role=rule.get(\"role\"), wm_type=rule.get(\"wm_type\"),\n96 wm_instance_class=rule.get(\"wm_instance_class\"),\n97 net_wm_pid=rule.get(\"net_wm_pid\"))\n98 \n99 self.float_rules[index] = match\n100 \n101 self.no_reposition_rules = no_reposition_rules or []\n102 self.add_defaults(Floating.defaults)\n103 \n104 def match(self, win):\n105 \"\"\"Used to default float some windows\"\"\"\n106 return any(win.match(rule) for rule in self.float_rules)\n107 \n108 def find_clients(self, group):\n109 \"\"\"Find all clients belonging to a given group\"\"\"\n110 return [c for c in self.clients if c.group is group]\n111 \n112 def to_screen(self, group, new_screen):\n113 \"\"\"Adjust offsets of clients within current screen\"\"\"\n114 for win in self.find_clients(group):\n115 if win.maximized:\n116 win.maximized = True\n117 elif win.fullscreen:\n118 win.fullscreen = True\n119 else:\n120 # catch if the client hasn't been configured\n121 try:\n122 # By default, place window at same offset from top corner\n123 new_x = new_screen.x + win.float_x\n124 new_y = new_screen.y + win.float_y\n125 except AttributeError:\n126 # this will be handled in .configure()\n127 pass\n128 else:\n129 # make sure window isn't off screen left/right...\n130 new_x = min(new_x, new_screen.x + new_screen.width - win.width)\n131 new_x = max(new_x, new_screen.x)\n132 # and up/down\n133 new_y = min(new_y, new_screen.y + new_screen.height - win.height)\n134 new_y = max(new_y, new_screen.y)\n135 \n136 win.x = new_x\n137 win.y = new_y\n138 win.group = new_screen.group\n139 \n140 def focus_first(self, group=None):\n141 if group is None:\n142 clients = self.clients\n143 else:\n144 clients = self.find_clients(group)\n145 \n146 if clients:\n147 return clients[0]\n148 \n149 def focus_next(self, win):\n150 if win not in self.clients or win.group is None:\n151 return\n152 \n153 clients = self.find_clients(win.group)\n154 idx = clients.index(win)\n155 if len(clients) > idx + 1:\n156 return clients[idx + 1]\n157 \n158 def focus_last(self, group=None):\n159 if group is None:\n160 clients = self.clients\n161 else:\n162 clients = self.find_clients(group)\n163 \n164 if clients:\n165 return clients[-1]\n166 \n167 def focus_previous(self, win):\n168 if win not in self.clients or win.group is None:\n169 return\n170 \n171 clients = self.find_clients(win.group)\n172 idx = clients.index(win)\n173 if idx > 0:\n174 return clients[idx - 1]\n175 \n176 def focus(self, client):\n177 self.focused = client\n178 \n179 def blur(self):\n180 self.focused = None\n181 \n182 def compute_client_position(self, client, screen_rect):\n183 \"\"\" recompute client.x and client.y, returning whether or not to place\n184 this client above other windows or not \"\"\"\n185 above = False\n186 transient_for = client.window.get_wm_transient_for()\n187 win = client.group.qtile.windows_map.get(transient_for)\n188 if win is not None:\n189 # if transient for a window, place in the center of the window\n190 center_x = win.x + win.width / 2\n191 center_y = win.y + win.height / 2\n192 else:\n193 center_x = screen_rect.x + screen_rect.width / 2\n194 center_y = screen_rect.y + screen_rect.height / 2\n195 above = True\n196 \n197 x = center_x - client.width / 2\n198 y = center_y - client.height / 2\n199 \n200 # don't go off the right...\n201 x = min(x, screen_rect.x + screen_rect.width)\n202 # or left...\n203 x = max(x, screen_rect.x)\n204 # or bottom...\n205 y = min(y, screen_rect.y + screen_rect.height)\n206 # or top\n207 y = max(y, screen_rect.y)\n208 \n209 client.x = int(round(x))\n210 client.y = int(round(y))\n211 return above\n212 \n213 def configure(self, client, screen_rect):\n214 if client.has_focus:\n215 bc = self.border_focus\n216 else:\n217 bc = self.border_normal\n218 \n219 if client.maximized:\n220 bw = self.max_border_width\n221 elif client.fullscreen:\n222 bw = self.fullscreen_border_width\n223 else:\n224 bw = self.border_width\n225 \n226 # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n227 # don't reposition it anywhere, let Java app to control it\n228 cls = client.window.get_wm_class() or ''\n229 is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\n230 if is_java_dropdown:\n231 client.paint_borders(bc, bw)\n232 client.cmd_bring_to_front()\n233 \n234 # similar to above but the X11 version, the client may have already\n235 # placed itself. let's respect that\n236 elif client.has_user_set_position():\n237 client.paint_borders(bc, bw)\n238 client.cmd_bring_to_front()\n239 \n240 # ok, it's not java and the window itself didn't position it, but users\n241 # may still have asked us not to mess with it\n242 elif any(m.compare(client) for m in self.no_reposition_rules):\n243 client.paint_borders(bc, bw)\n244 client.cmd_bring_to_front()\n245 \n246 else:\n247 above = False\n248 \n249 # We definitely have a screen here, so let's be sure we'll float on screen\n250 try:\n251 client.float_x\n252 client.float_y\n253 except AttributeError:\n254 # this window hasn't been placed before, let's put it in a sensible spot\n255 above = self.compute_client_position(client, screen_rect)\n256 \n257 client.place(\n258 client.x,\n259 client.y,\n260 client.width,\n261 client.height,\n262 bw,\n263 bc,\n264 above,\n265 )\n266 client.unhide()\n267 \n268 def add(self, client):\n269 self.clients.append(client)\n270 self.focused = client\n271 \n272 def remove(self, client):\n273 if client not in self.clients:\n274 return\n275 \n276 next_focus = self.focus_next(client)\n277 if client is self.focused:\n278 self.blur()\n279 self.clients.remove(client)\n280 return next_focus\n281 \n282 def info(self):\n283 d = Layout.info(self)\n284 d[\"clients\"] = [c.name for c in self.clients]\n285 return d\n286 \n287 def cmd_next(self):\n288 # This can't ever be called, but implement the abstract method\n289 pass\n290 \n291 def cmd_previous(self):\n292 # This can't ever be called, but implement the abstract method\n293 pass\n294\n[end of libqtile/layout/floating.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py\n--- a/libqtile/layout/floating.py\n+++ b/libqtile/layout/floating.py\n@@ -179,35 +179,53 @@\n def blur(self):\n self.focused = None\n \n+ def on_screen(self, client, screen_rect):\n+ if client.x < screen_rect.x: # client's left edge\n+ return False\n+ if screen_rect.x + screen_rect.width < client.x + client.width: # right\n+ return False\n+ if client.y < screen_rect.y: # top\n+ return False\n+ if screen_rect.y + screen_rect.width < client.y + client.height: # bottom\n+ return False\n+ return True\n+\n def compute_client_position(self, client, screen_rect):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n- above = False\n- transient_for = client.window.get_wm_transient_for()\n- win = client.group.qtile.windows_map.get(transient_for)\n- if win is not None:\n- # if transient for a window, place in the center of the window\n- center_x = win.x + win.width / 2\n- center_y = win.y + win.height / 2\n- else:\n- center_x = screen_rect.x + screen_rect.width / 2\n- center_y = screen_rect.y + screen_rect.height / 2\n- above = True\n-\n- x = center_x - client.width / 2\n- y = center_y - client.height / 2\n-\n- # don't go off the right...\n- x = min(x, screen_rect.x + screen_rect.width)\n- # or left...\n- x = max(x, screen_rect.x)\n- # or bottom...\n- y = min(y, screen_rect.y + screen_rect.height)\n- # or top\n- y = max(y, screen_rect.y)\n-\n- client.x = int(round(x))\n- client.y = int(round(y))\n+ above = True\n+\n+ if client.has_user_set_position() and not self.on_screen(client, screen_rect):\n+ # move to screen\n+ client.x = screen_rect.x + client.x\n+ client.y = screen_rect.y + client.y\n+ if not client.has_user_set_position() or not self.on_screen(client, screen_rect):\n+ # client has not been properly placed before or it is off screen\n+ transient_for = client.window.get_wm_transient_for()\n+ win = client.group.qtile.windows_map.get(transient_for)\n+ if win is not None:\n+ # if transient for a window, place in the center of the window\n+ center_x = win.x + win.width / 2\n+ center_y = win.y + win.height / 2\n+ above = False\n+ else:\n+ center_x = screen_rect.x + screen_rect.width / 2\n+ center_y = screen_rect.y + screen_rect.height / 2\n+\n+ x = center_x - client.width / 2\n+ y = center_y - client.height / 2\n+\n+ # don't go off the right...\n+ x = min(x, screen_rect.x + screen_rect.width - client.width)\n+ # or left...\n+ x = max(x, screen_rect.x)\n+ # or bottom...\n+ y = min(y, screen_rect.y + screen_rect.height - client.height)\n+ # or top\n+ y = max(y, screen_rect.y)\n+\n+ client.x = int(round(x))\n+ client.y = int(round(y))\n return above\n \n def configure(self, client, screen_rect):\n@@ -231,14 +249,7 @@\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n \n- # similar to above but the X11 version, the client may have already\n- # placed itself. let's respect that\n- elif client.has_user_set_position():\n- client.paint_borders(bc, bw)\n- client.cmd_bring_to_front()\n-\n- # ok, it's not java and the window itself didn't position it, but users\n- # may still have asked us not to mess with it\n+ # alternatively, users may have asked us explicitly to leave the client alone\n elif any(m.compare(client) for m in self.no_reposition_rules):\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py\\n--- a/libqtile/layout/floating.py\\n+++ b/libqtile/layout/floating.py\\n@@ -179,35 +179,53 @@\\n def blur(self):\\n self.focused = None\\n \\n+ def on_screen(self, client, screen_rect):\\n+ if client.x < screen_rect.x: # client's left edge\\n+ return False\\n+ if screen_rect.x + screen_rect.width < client.x + client.width: # right\\n+ return False\\n+ if client.y < screen_rect.y: # top\\n+ return False\\n+ if screen_rect.y + screen_rect.width < client.y + client.height: # bottom\\n+ return False\\n+ return True\\n+\\n def compute_client_position(self, client, screen_rect):\\n \\\"\\\"\\\" recompute client.x and client.y, returning whether or not to place\\n this client above other windows or not \\\"\\\"\\\"\\n- above = False\\n- transient_for = client.window.get_wm_transient_for()\\n- win = client.group.qtile.windows_map.get(transient_for)\\n- if win is not None:\\n- # if transient for a window, place in the center of the window\\n- center_x = win.x + win.width / 2\\n- center_y = win.y + win.height / 2\\n- else:\\n- center_x = screen_rect.x + screen_rect.width / 2\\n- center_y = screen_rect.y + screen_rect.height / 2\\n- above = True\\n-\\n- x = center_x - client.width / 2\\n- y = center_y - client.height / 2\\n-\\n- # don't go off the right...\\n- x = min(x, screen_rect.x + screen_rect.width)\\n- # or left...\\n- x = max(x, screen_rect.x)\\n- # or bottom...\\n- y = min(y, screen_rect.y + screen_rect.height)\\n- # or top\\n- y = max(y, screen_rect.y)\\n-\\n- client.x = int(round(x))\\n- client.y = int(round(y))\\n+ above = True\\n+\\n+ if client.has_user_set_position() and not self.on_screen(client, screen_rect):\\n+ # move to screen\\n+ client.x = screen_rect.x + client.x\\n+ client.y = screen_rect.y + client.y\\n+ if not client.has_user_set_position() or not self.on_screen(client, screen_rect):\\n+ # client has not been properly placed before or it is off screen\\n+ transient_for = client.window.get_wm_transient_for()\\n+ win = client.group.qtile.windows_map.get(transient_for)\\n+ if win is not None:\\n+ # if transient for a window, place in the center of the window\\n+ center_x = win.x + win.width / 2\\n+ center_y = win.y + win.height / 2\\n+ above = False\\n+ else:\\n+ center_x = screen_rect.x + screen_rect.width / 2\\n+ center_y = screen_rect.y + screen_rect.height / 2\\n+\\n+ x = center_x - client.width / 2\\n+ y = center_y - client.height / 2\\n+\\n+ # don't go off the right...\\n+ x = min(x, screen_rect.x + screen_rect.width - client.width)\\n+ # or left...\\n+ x = max(x, screen_rect.x)\\n+ # or bottom...\\n+ y = min(y, screen_rect.y + screen_rect.height - client.height)\\n+ # or top\\n+ y = max(y, screen_rect.y)\\n+\\n+ client.x = int(round(x))\\n+ client.y = int(round(y))\\n return above\\n \\n def configure(self, client, screen_rect):\\n@@ -231,14 +249,7 @@\\n client.paint_borders(bc, bw)\\n client.cmd_bring_to_front()\\n \\n- # similar to above but the X11 version, the client may have already\\n- # placed itself. let's respect that\\n- elif client.has_user_set_position():\\n- client.paint_borders(bc, bw)\\n- client.cmd_bring_to_front()\\n-\\n- # ok, it's not java and the window itself didn't position it, but users\\n- # may still have asked us not to mess with it\\n+ # alternatively, users may have asked us explicitly to leave the client alone\\n elif any(m.compare(client) for m in self.no_reposition_rules):\\n client.paint_borders(bc, bw)\\n client.cmd_bring_to_front()\\n\", \"issue\": \"Popups and child-windows are drawn behind parent windows in certain situations\\n# Issue description\\r\\nIn some situations I noticed that popups or child-windows are drawn (or moved) behind their respective parents; for child-windows this can be quite a problem in case the child is blocking, since then the software might be completely unresponsive.\\r\\n\\r\\nPlaces where I have noticed this and that I remember:\\r\\n1. Virtualbox (fullscreen) (the main window, not a client): Context menus on VM-list open in the background.\\r\\n2. Veracrypt (floating) \\\"mount\\\" child-window: The window either spawns in the back or is moved there once it loses focus.\\r\\n\\r\\nThere have been some other cases, but due to testing PR #1870, which I initially expected to be the cause for this, I haven't taken note of all of them.\\r\\nSome software definitely has working popup-windows, so Virtualbox might be doing something weird.\\r\\n\\r\\n# Qtile version\\r\\n\\r\\nhttps://github.com/qtile/qtile/commit/c6b80e9444e9b4e5dc2f52ca1954dc502c45f2eb\\r\\n(newer versions not tested yet)\\r\\n\\r\\n# Configuration\\r\\n\\r\\nI'll only include one snippet here, since verything else (setting up Keys and Widgets) most likely has nothing to do with the problem.\\r\\n\\r\\n```python\\r\\n@hook.subscribe.client_new\\r\\ndef modify_window(client):\\r\\n if (client.window.get_wm_transient_for() or client.window.get_wm_type() in floating_types):\\r\\n client.floating = True\\r\\n```\\r\\n\\r\\nOn top of this I am currently using fake screens instead of normal screens.\\r\\n\\r\\nIt would be nice if somebody could confirm this, since I am not entirely sure this isn't all caused by my own PR.\\r\\n\\r\\nRelated to #1870, which could fix at least the described issue with child-windows.\\n\", \"before_files\": [{\"content\": \"# Copyright (c) 2010 matt\\n# Copyright (c) 2010-2011 Paul Colomiets\\n# Copyright (c) 2011 Mounier Florian\\n# Copyright (c) 2012 Craig Barnes\\n# Copyright (c) 2012, 2014-2015 Tycho Andersen\\n# Copyright (c) 2013 Tao Sauvage\\n# Copyright (c) 2013 Julien Iguchi-Cartigny\\n# Copyright (c) 2014 ramnes\\n# Copyright (c) 2014 Sean Vig\\n# Copyright (c) 2014 dequis\\n# Copyright (c) 2018 Nazar Mokrynskyi\\n#\\n# Permission is hereby granted, free of charge, to any person obtaining a copy\\n# of this software and associated documentation files (the \\\"Software\\\"), to deal\\n# in the Software without restriction, including without limitation the rights\\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\\n# copies of the Software, and to permit persons to whom the Software is\\n# furnished to do so, subject to the following conditions:\\n#\\n# The above copyright notice and this permission notice shall be included in\\n# all copies or substantial portions of the Software.\\n#\\n# THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\\n# SOFTWARE.\\n\\nimport warnings\\n\\nfrom libqtile.config import Match\\nfrom libqtile.layout.base import Layout\\nfrom libqtile.log_utils import logger\\n\\n\\nclass Floating(Layout):\\n \\\"\\\"\\\"\\n Floating layout, which does nothing with windows but handles focus order\\n \\\"\\\"\\\"\\n defaults = [\\n (\\\"border_focus\\\", \\\"#0000ff\\\", \\\"Border colour for the focused window.\\\"),\\n (\\\"border_normal\\\", \\\"#000000\\\", \\\"Border colour for un-focused windows.\\\"),\\n (\\\"border_width\\\", 1, \\\"Border width.\\\"),\\n (\\\"max_border_width\\\", 0, \\\"Border width for maximize.\\\"),\\n (\\\"fullscreen_border_width\\\", 0, \\\"Border width for fullscreen.\\\"),\\n (\\\"name\\\", \\\"floating\\\", \\\"Name of this layout.\\\"),\\n ]\\n\\n def __init__(self, float_rules=None, no_reposition_rules=None, **config):\\n \\\"\\\"\\\"\\n If you have certain apps that you always want to float you can provide\\n ``float_rules`` to do so. ``float_rules`` are a list of\\n Match objects::\\n\\n from libqtile.config import Match\\n Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)\\n\\n When a new window is opened its ``match`` method is called with each of\\n these rules. If one matches, the window will float. The following\\n will float GIMP and Skype::\\n\\n from libqtile.config import Match\\n float_rules=[Match(wm_class=\\\"skype\\\"), Match(wm_class=\\\"gimp\\\")]\\n\\n Specify these in the ``floating_layout`` in your config.\\n\\n Floating layout will try to center most of floating windows by default,\\n but if you don't want this to happen for certain windows that are\\n centered by mistake, you can use ``no_reposition_rules`` option to\\n specify them and layout will rely on windows to position themselves in\\n correct location on the screen.\\n \\\"\\\"\\\"\\n Layout.__init__(self, **config)\\n self.clients = []\\n self.focused = None\\n self.group = None\\n self.float_rules = float_rules or []\\n\\n warned = False\\n for index, rule in enumerate(self.float_rules):\\n if isinstance(rule, Match):\\n continue\\n\\n if not warned:\\n message = \\\"Non-config.Match objects in float_rules are \\\" \\\\\\n \\\"deprecated\\\"\\n warnings.warn(message, DeprecationWarning)\\n logger.warning(message)\\n warned = True\\n\\n match = Match(\\n title=rule.get(\\\"wname\\\"), wm_class=rule.get(\\\"wmclass\\\"),\\n role=rule.get(\\\"role\\\"), wm_type=rule.get(\\\"wm_type\\\"),\\n wm_instance_class=rule.get(\\\"wm_instance_class\\\"),\\n net_wm_pid=rule.get(\\\"net_wm_pid\\\"))\\n\\n self.float_rules[index] = match\\n\\n self.no_reposition_rules = no_reposition_rules or []\\n self.add_defaults(Floating.defaults)\\n\\n def match(self, win):\\n \\\"\\\"\\\"Used to default float some windows\\\"\\\"\\\"\\n return any(win.match(rule) for rule in self.float_rules)\\n\\n def find_clients(self, group):\\n \\\"\\\"\\\"Find all clients belonging to a given group\\\"\\\"\\\"\\n return [c for c in self.clients if c.group is group]\\n\\n def to_screen(self, group, new_screen):\\n \\\"\\\"\\\"Adjust offsets of clients within current screen\\\"\\\"\\\"\\n for win in self.find_clients(group):\\n if win.maximized:\\n win.maximized = True\\n elif win.fullscreen:\\n win.fullscreen = True\\n else:\\n # catch if the client hasn't been configured\\n try:\\n # By default, place window at same offset from top corner\\n new_x = new_screen.x + win.float_x\\n new_y = new_screen.y + win.float_y\\n except AttributeError:\\n # this will be handled in .configure()\\n pass\\n else:\\n # make sure window isn't off screen left/right...\\n new_x = min(new_x, new_screen.x + new_screen.width - win.width)\\n new_x = max(new_x, new_screen.x)\\n # and up/down\\n new_y = min(new_y, new_screen.y + new_screen.height - win.height)\\n new_y = max(new_y, new_screen.y)\\n\\n win.x = new_x\\n win.y = new_y\\n win.group = new_screen.group\\n\\n def focus_first(self, group=None):\\n if group is None:\\n clients = self.clients\\n else:\\n clients = self.find_clients(group)\\n\\n if clients:\\n return clients[0]\\n\\n def focus_next(self, win):\\n if win not in self.clients or win.group is None:\\n return\\n\\n clients = self.find_clients(win.group)\\n idx = clients.index(win)\\n if len(clients) > idx + 1:\\n return clients[idx + 1]\\n\\n def focus_last(self, group=None):\\n if group is None:\\n clients = self.clients\\n else:\\n clients = self.find_clients(group)\\n\\n if clients:\\n return clients[-1]\\n\\n def focus_previous(self, win):\\n if win not in self.clients or win.group is None:\\n return\\n\\n clients = self.find_clients(win.group)\\n idx = clients.index(win)\\n if idx > 0:\\n return clients[idx - 1]\\n\\n def focus(self, client):\\n self.focused = client\\n\\n def blur(self):\\n self.focused = None\\n\\n def compute_client_position(self, client, screen_rect):\\n \\\"\\\"\\\" recompute client.x and client.y, returning whether or not to place\\n this client above other windows or not \\\"\\\"\\\"\\n above = False\\n transient_for = client.window.get_wm_transient_for()\\n win = client.group.qtile.windows_map.get(transient_for)\\n if win is not None:\\n # if transient for a window, place in the center of the window\\n center_x = win.x + win.width / 2\\n center_y = win.y + win.height / 2\\n else:\\n center_x = screen_rect.x + screen_rect.width / 2\\n center_y = screen_rect.y + screen_rect.height / 2\\n above = True\\n\\n x = center_x - client.width / 2\\n y = center_y - client.height / 2\\n\\n # don't go off the right...\\n x = min(x, screen_rect.x + screen_rect.width)\\n # or left...\\n x = max(x, screen_rect.x)\\n # or bottom...\\n y = min(y, screen_rect.y + screen_rect.height)\\n # or top\\n y = max(y, screen_rect.y)\\n\\n client.x = int(round(x))\\n client.y = int(round(y))\\n return above\\n\\n def configure(self, client, screen_rect):\\n if client.has_focus:\\n bc = self.border_focus\\n else:\\n bc = self.border_normal\\n\\n if client.maximized:\\n bw = self.max_border_width\\n elif client.fullscreen:\\n bw = self.fullscreen_border_width\\n else:\\n bw = self.border_width\\n\\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\\n # don't reposition it anywhere, let Java app to control it\\n cls = client.window.get_wm_class() or ''\\n is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\\n if is_java_dropdown:\\n client.paint_borders(bc, bw)\\n client.cmd_bring_to_front()\\n\\n # similar to above but the X11 version, the client may have already\\n # placed itself. let's respect that\\n elif client.has_user_set_position():\\n client.paint_borders(bc, bw)\\n client.cmd_bring_to_front()\\n\\n # ok, it's not java and the window itself didn't position it, but users\\n # may still have asked us not to mess with it\\n elif any(m.compare(client) for m in self.no_reposition_rules):\\n client.paint_borders(bc, bw)\\n client.cmd_bring_to_front()\\n\\n else:\\n above = False\\n\\n # We definitely have a screen here, so let's be sure we'll float on screen\\n try:\\n client.float_x\\n client.float_y\\n except AttributeError:\\n # this window hasn't been placed before, let's put it in a sensible spot\\n above = self.compute_client_position(client, screen_rect)\\n\\n client.place(\\n client.x,\\n client.y,\\n client.width,\\n client.height,\\n bw,\\n bc,\\n above,\\n )\\n client.unhide()\\n\\n def add(self, client):\\n self.clients.append(client)\\n self.focused = client\\n\\n def remove(self, client):\\n if client not in self.clients:\\n return\\n\\n next_focus = self.focus_next(client)\\n if client is self.focused:\\n self.blur()\\n self.clients.remove(client)\\n return next_focus\\n\\n def info(self):\\n d = Layout.info(self)\\n d[\\\"clients\\\"] = [c.name for c in self.clients]\\n return d\\n\\n def cmd_next(self):\\n # This can't ever be called, but implement the abstract method\\n pass\\n\\n def cmd_previous(self):\\n # This can't ever be called, but implement the abstract method\\n pass\\n\", \"path\": \"libqtile/layout/floating.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":4081,"string":"4,081"},"num_tokens_diff":{"kind":"number","value":1013,"string":"1,013"}}},{"rowIdx":18154,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_24628"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"scverse__scanpy-1554"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\ndeprecate scvi in external\nI was wondering if we could deprecate the scvi external wrapper as we now have `scvi-tools`. I could also update the wrapper to have minimal functionality, but I think it would be better for people to use our API now that it's tightly integrated with scanpy anyway.\n\n\n\n[start of scanpy/external/pp/_scvi.py]\n1 import numpy as np\n2 import pandas as pd\n3 import scipy as sp\n4 \n5 from typing import Optional, Sequence, Union\n6 from anndata import AnnData\n7 \n8 MIN_VERSION = \"0.6.5\"\n9 \n10 \n11 def scvi(\n12 adata: AnnData,\n13 n_hidden: int = 128,\n14 n_latent: int = 10,\n15 n_layers: int = 1,\n16 dispersion: str = \"gene\",\n17 n_epochs: int = 400,\n18 lr: int = 1e-3,\n19 train_size: int = 1.0,\n20 batch_key: Optional[str] = None,\n21 use_highly_variable_genes: bool = True,\n22 subset_genes: Optional[Sequence[Union[int, str]]] = None,\n23 linear_decoder: bool = False,\n24 copy: bool = False,\n25 use_cuda: bool = True,\n26 return_posterior: bool = True,\n27 trainer_kwargs: dict = {},\n28 model_kwargs: dict = {},\n29 ) -> Optional[AnnData]:\n30 \"\"\"\\\n31 SCVI [Lopez18]_.\n32 \n33 Fits scVI model onto raw count data given an anndata object\n34 \n35 scVI uses stochastic optimization and deep neural networks to aggregate information \n36 across similar cells and genes and to approximate the distributions that underlie\n37 observed expression values, while accounting for batch effects and limited sensitivity.\n38 \n39 To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.),\n40 set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can \n41 be used to inspect which genes contribute to variation in the dataset. It may also be used\n42 for all scVI tasks, like differential expression, batch correction, imputation, etc.\n43 However, batch correction may be less powerful as it assumes a linear model.\n44 \n45 .. note::\n46 More information and bug reports `here `__.\n47 \n48 Parameters\n49 ----------\n50 adata\n51 An anndata file with `X` attribute of unnormalized count data\n52 n_hidden\n53 Number of nodes per hidden layer\n54 n_latent\n55 Dimensionality of the latent space\n56 n_layers\n57 Number of hidden layers used for encoder and decoder NNs\n58 dispersion\n59 One of the following\n60 * `'gene'` - dispersion parameter of NB is constant per gene across cells\n61 * `'gene-batch'` - dispersion can differ between different batches\n62 * `'gene-label'` - dispersion can differ between different labels\n63 * `'gene-cell'` - dispersion can differ for every gene in every cell\n64 n_epochs\n65 Number of epochs to train\n66 lr\n67 Learning rate\n68 train_size\n69 The train size, either a float between 0 and 1 or an integer for the number of training samples to use\n70 batch_key\n71 Column name in anndata.obs for batches. \n72 If None, no batch correction is performed\n73 If not None, batch correction is performed per batch category\n74 use_highly_variable_genes\n75 If true, uses only the genes in anndata.var[\"highly_variable\"]\n76 subset_genes\n77 Optional list of indices or gene names to subset anndata. \n78 If not None, use_highly_variable_genes is ignored\n79 linear_decoder\n80 If true, uses LDVAE model, which is an implementation of [Svensson20]_.\n81 copy\n82 If true, a copy of anndata is returned\n83 return_posterior\n84 If true, posterior object is returned\n85 use_cuda\n86 If true, uses cuda\n87 trainer_kwargs\n88 Extra arguments for UnsupervisedTrainer\n89 model_kwargs\n90 Extra arguments for VAE or LDVAE model\n91 \n92 Returns\n93 -------\n94 If `copy` is true, anndata is returned.\n95 If `return_posterior` is true, the posterior object is returned\n96 If both `copy` and `return_posterior` are true, \n97 a tuple of anndata and the posterior are returned in that order. \n98 \n99 `adata.obsm['X_scvi']` stores the latent representations\n100 `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial\n101 `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial\n102 \n103 If linear_decoder is true:\n104 `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a\n105 genes by n_latent matrix.\n106 \n107 \"\"\"\n108 \n109 try:\n110 from scvi.models import VAE, LDVAE\n111 from scvi.inference import UnsupervisedTrainer\n112 from scvi.dataset import AnnDatasetFromAnnData\n113 except ImportError:\n114 raise ImportError(\n115 \"Please install scvi package from https://github.com/YosefLab/scVI\"\n116 )\n117 \n118 # check if observations are unnormalized using first 10\n119 # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69\n120 if len(adata) > 10:\n121 X_subset = adata.X[:10]\n122 else:\n123 X_subset = adata.X\n124 norm_error = (\n125 'Make sure that the dataset (adata.X) contains unnormalized count data.'\n126 )\n127 if sp.sparse.issparse(X_subset):\n128 assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error\n129 else:\n130 assert np.all(X_subset.astype(int) == X_subset), norm_error\n131 \n132 if subset_genes is not None:\n133 adata_subset = adata[:, subset_genes]\n134 elif use_highly_variable_genes and \"highly_variable\" in adata.var:\n135 adata_subset = adata[:, adata.var[\"highly_variable\"]]\n136 else:\n137 adata_subset = adata\n138 \n139 if batch_key is not None:\n140 codes, uniques = pd.factorize(adata_subset.obs[batch_key])\n141 adata_subset.obs['_tmp_scvi_batch'] = codes\n142 n_batches = len(uniques)\n143 else:\n144 n_batches = 0\n145 \n146 dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch')\n147 \n148 if linear_decoder:\n149 vae = LDVAE(\n150 n_input=dataset.nb_genes,\n151 n_batch=n_batches,\n152 n_labels=dataset.n_labels,\n153 n_hidden=n_hidden,\n154 n_latent=n_latent,\n155 n_layers_encoder=n_layers,\n156 dispersion=dispersion,\n157 **model_kwargs,\n158 )\n159 \n160 else:\n161 vae = VAE(\n162 dataset.nb_genes,\n163 n_batch=n_batches,\n164 n_labels=dataset.n_labels,\n165 n_hidden=n_hidden,\n166 n_latent=n_latent,\n167 n_layers=n_layers,\n168 dispersion=dispersion,\n169 **model_kwargs,\n170 )\n171 \n172 trainer = UnsupervisedTrainer(\n173 model=vae,\n174 gene_dataset=dataset,\n175 use_cuda=use_cuda,\n176 train_size=train_size,\n177 **trainer_kwargs,\n178 )\n179 \n180 trainer.train(n_epochs=n_epochs, lr=lr)\n181 \n182 full = trainer.create_posterior(\n183 trainer.model, dataset, indices=np.arange(len(dataset))\n184 )\n185 latent, batch_indices, labels = full.sequential().get_latent()\n186 \n187 if copy:\n188 adata = adata.copy()\n189 \n190 adata.obsm['X_scvi'] = latent\n191 adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale()\n192 adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation()\n193 \n194 if linear_decoder:\n195 loadings = vae.get_loadings()\n196 df = pd.DataFrame(loadings, index=adata_subset.var_names)\n197 adata.uns['ldvae_loadings'] = df\n198 \n199 if copy and return_posterior:\n200 return adata, full\n201 elif copy:\n202 return adata\n203 elif return_posterior:\n204 return full\n205\n[end of scanpy/external/pp/_scvi.py]\n[start of setup.py]\n1 import sys\n2 \n3 if sys.version_info < (3, 6):\n4 sys.exit('scanpy requires Python >= 3.6')\n5 from pathlib import Path\n6 \n7 from setuptools import setup, find_packages\n8 \n9 try:\n10 import pytoml\n11 except ImportError:\n12 sys.exit('Please use `pip install .` or install pytoml first.')\n13 \n14 proj = pytoml.loads(Path('pyproject.toml').read_text())\n15 metadata = proj['tool']['scanpy']\n16 \n17 setup(\n18 name='scanpy',\n19 use_scm_version=True,\n20 setup_requires=['setuptools_scm'],\n21 description='Single-Cell Analysis in Python.',\n22 long_description=Path('README.rst').read_text('utf-8'),\n23 url='http://github.com/theislab/scanpy',\n24 author=metadata['author'],\n25 author_email=metadata['author-email'],\n26 license='BSD',\n27 python_requires='>=3.6',\n28 install_requires=[\n29 l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n30 ],\n31 extras_require=dict(\n32 louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n33 leiden=['python-igraph', 'leidenalg'],\n34 bbknn=['bbknn'],\n35 scvi=['scvi>=0.6.5'],\n36 rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n37 magic=['magic-impute>=2.0'],\n38 skmisc=['scikit-misc>=0.1.3'],\n39 harmony=['harmonypy'],\n40 scrublet=['scrublet'],\n41 dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'],\n42 doc=[\n43 'sphinx>=3.2',\n44 'sphinx_rtd_theme>=0.3.1',\n45 'sphinx_autodoc_typehints',\n46 'scanpydoc>=0.5',\n47 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n48 ],\n49 test=[\n50 'pytest>=4.4',\n51 'dask[array]!=2.17.0',\n52 'fsspec',\n53 'zappy',\n54 'zarr',\n55 'profimp',\n56 ],\n57 ),\n58 packages=find_packages(),\n59 include_package_data=True,\n60 entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n61 zip_safe=False,\n62 classifiers=[\n63 'Development Status :: 5 - Production/Stable',\n64 'Environment :: Console',\n65 'Framework :: Jupyter',\n66 'Intended Audience :: Developers',\n67 'Intended Audience :: Science/Research',\n68 'Natural Language :: English',\n69 'Operating System :: MacOS :: MacOS X',\n70 'Operating System :: Microsoft :: Windows',\n71 'Operating System :: POSIX :: Linux',\n72 'Programming Language :: Python :: 3',\n73 'Programming Language :: Python :: 3.5',\n74 'Programming Language :: Python :: 3.6',\n75 'Programming Language :: Python :: 3.7',\n76 'Topic :: Scientific/Engineering :: Bio-Informatics',\n77 'Topic :: Scientific/Engineering :: Visualization',\n78 ],\n79 )\n80\n[end of setup.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/scanpy/external/pp/_scvi.py b/scanpy/external/pp/_scvi.py\n--- a/scanpy/external/pp/_scvi.py\n+++ b/scanpy/external/pp/_scvi.py\n@@ -1,3 +1,4 @@\n+import warnings\n import numpy as np\n import pandas as pd\n import scipy as sp\n@@ -5,7 +6,7 @@\n from typing import Optional, Sequence, Union\n from anndata import AnnData\n \n-MIN_VERSION = \"0.6.5\"\n+MIN_VERSION = \"0.6.7\"\n \n \n def scvi(\n@@ -105,6 +106,11 @@\n genes by n_latent matrix.\n \n \"\"\"\n+ warnings.warn(\n+ \"scvi via scanpy external API is no longer supported. \"\n+ + \"Please use the new scvi-tools package from `scvi-tools.org`\",\n+ FutureWarning,\n+ )\n \n try:\n from scvi.models import VAE, LDVAE\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n- scvi=['scvi>=0.6.5'],\n+ scvi=['scvi==0.6.7'],\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n magic=['magic-impute>=2.0'],\n skmisc=['scikit-misc>=0.1.3'],\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/scanpy/external/pp/_scvi.py b/scanpy/external/pp/_scvi.py\\n--- a/scanpy/external/pp/_scvi.py\\n+++ b/scanpy/external/pp/_scvi.py\\n@@ -1,3 +1,4 @@\\n+import warnings\\n import numpy as np\\n import pandas as pd\\n import scipy as sp\\n@@ -5,7 +6,7 @@\\n from typing import Optional, Sequence, Union\\n from anndata import AnnData\\n \\n-MIN_VERSION = \\\"0.6.5\\\"\\n+MIN_VERSION = \\\"0.6.7\\\"\\n \\n \\n def scvi(\\n@@ -105,6 +106,11 @@\\n genes by n_latent matrix.\\n \\n \\\"\\\"\\\"\\n+ warnings.warn(\\n+ \\\"scvi via scanpy external API is no longer supported. \\\"\\n+ + \\\"Please use the new scvi-tools package from `scvi-tools.org`\\\",\\n+ FutureWarning,\\n+ )\\n \\n try:\\n from scvi.models import VAE, LDVAE\\ndiff --git a/setup.py b/setup.py\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -32,7 +32,7 @@\\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\\n leiden=['python-igraph', 'leidenalg'],\\n bbknn=['bbknn'],\\n- scvi=['scvi>=0.6.5'],\\n+ scvi=['scvi==0.6.7'],\\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\\n magic=['magic-impute>=2.0'],\\n skmisc=['scikit-misc>=0.1.3'],\\n\", \"issue\": \"deprecate scvi in external\\nI was wondering if we could deprecate the scvi external wrapper as we now have `scvi-tools`. I could also update the wrapper to have minimal functionality, but I think it would be better for people to use our API now that it's tightly integrated with scanpy anyway.\\n\", \"before_files\": [{\"content\": \"import numpy as np\\nimport pandas as pd\\nimport scipy as sp\\n\\nfrom typing import Optional, Sequence, Union\\nfrom anndata import AnnData\\n\\nMIN_VERSION = \\\"0.6.5\\\"\\n\\n\\ndef scvi(\\n adata: AnnData,\\n n_hidden: int = 128,\\n n_latent: int = 10,\\n n_layers: int = 1,\\n dispersion: str = \\\"gene\\\",\\n n_epochs: int = 400,\\n lr: int = 1e-3,\\n train_size: int = 1.0,\\n batch_key: Optional[str] = None,\\n use_highly_variable_genes: bool = True,\\n subset_genes: Optional[Sequence[Union[int, str]]] = None,\\n linear_decoder: bool = False,\\n copy: bool = False,\\n use_cuda: bool = True,\\n return_posterior: bool = True,\\n trainer_kwargs: dict = {},\\n model_kwargs: dict = {},\\n) -> Optional[AnnData]:\\n \\\"\\\"\\\"\\\\\\n SCVI [Lopez18]_.\\n\\n Fits scVI model onto raw count data given an anndata object\\n\\n scVI uses stochastic optimization and deep neural networks to aggregate information \\n across similar cells and genes and to approximate the distributions that underlie\\n observed expression values, while accounting for batch effects and limited sensitivity.\\n\\n To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.),\\n set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can \\n be used to inspect which genes contribute to variation in the dataset. It may also be used\\n for all scVI tasks, like differential expression, batch correction, imputation, etc.\\n However, batch correction may be less powerful as it assumes a linear model.\\n\\n .. note::\\n More information and bug reports `here `__.\\n\\n Parameters\\n ----------\\n adata\\n An anndata file with `X` attribute of unnormalized count data\\n n_hidden\\n Number of nodes per hidden layer\\n n_latent\\n Dimensionality of the latent space\\n n_layers\\n Number of hidden layers used for encoder and decoder NNs\\n dispersion\\n One of the following\\n * `'gene'` - dispersion parameter of NB is constant per gene across cells\\n * `'gene-batch'` - dispersion can differ between different batches\\n * `'gene-label'` - dispersion can differ between different labels\\n * `'gene-cell'` - dispersion can differ for every gene in every cell\\n n_epochs\\n Number of epochs to train\\n lr\\n Learning rate\\n train_size\\n The train size, either a float between 0 and 1 or an integer for the number of training samples to use\\n batch_key\\n Column name in anndata.obs for batches. \\n If None, no batch correction is performed\\n If not None, batch correction is performed per batch category\\n use_highly_variable_genes\\n If true, uses only the genes in anndata.var[\\\"highly_variable\\\"]\\n subset_genes\\n Optional list of indices or gene names to subset anndata. \\n If not None, use_highly_variable_genes is ignored\\n linear_decoder\\n If true, uses LDVAE model, which is an implementation of [Svensson20]_.\\n copy\\n If true, a copy of anndata is returned\\n return_posterior\\n If true, posterior object is returned\\n use_cuda\\n If true, uses cuda\\n trainer_kwargs\\n Extra arguments for UnsupervisedTrainer\\n model_kwargs\\n Extra arguments for VAE or LDVAE model\\n \\n Returns\\n -------\\n If `copy` is true, anndata is returned.\\n If `return_posterior` is true, the posterior object is returned\\n If both `copy` and `return_posterior` are true, \\n a tuple of anndata and the posterior are returned in that order. \\n\\n `adata.obsm['X_scvi']` stores the latent representations\\n `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial\\n `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial\\n \\n If linear_decoder is true:\\n `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a\\n genes by n_latent matrix.\\n\\n \\\"\\\"\\\"\\n\\n try:\\n from scvi.models import VAE, LDVAE\\n from scvi.inference import UnsupervisedTrainer\\n from scvi.dataset import AnnDatasetFromAnnData\\n except ImportError:\\n raise ImportError(\\n \\\"Please install scvi package from https://github.com/YosefLab/scVI\\\"\\n )\\n\\n # check if observations are unnormalized using first 10\\n # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69\\n if len(adata) > 10:\\n X_subset = adata.X[:10]\\n else:\\n X_subset = adata.X\\n norm_error = (\\n 'Make sure that the dataset (adata.X) contains unnormalized count data.'\\n )\\n if sp.sparse.issparse(X_subset):\\n assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error\\n else:\\n assert np.all(X_subset.astype(int) == X_subset), norm_error\\n\\n if subset_genes is not None:\\n adata_subset = adata[:, subset_genes]\\n elif use_highly_variable_genes and \\\"highly_variable\\\" in adata.var:\\n adata_subset = adata[:, adata.var[\\\"highly_variable\\\"]]\\n else:\\n adata_subset = adata\\n\\n if batch_key is not None:\\n codes, uniques = pd.factorize(adata_subset.obs[batch_key])\\n adata_subset.obs['_tmp_scvi_batch'] = codes\\n n_batches = len(uniques)\\n else:\\n n_batches = 0\\n\\n dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch')\\n\\n if linear_decoder:\\n vae = LDVAE(\\n n_input=dataset.nb_genes,\\n n_batch=n_batches,\\n n_labels=dataset.n_labels,\\n n_hidden=n_hidden,\\n n_latent=n_latent,\\n n_layers_encoder=n_layers,\\n dispersion=dispersion,\\n **model_kwargs,\\n )\\n\\n else:\\n vae = VAE(\\n dataset.nb_genes,\\n n_batch=n_batches,\\n n_labels=dataset.n_labels,\\n n_hidden=n_hidden,\\n n_latent=n_latent,\\n n_layers=n_layers,\\n dispersion=dispersion,\\n **model_kwargs,\\n )\\n\\n trainer = UnsupervisedTrainer(\\n model=vae,\\n gene_dataset=dataset,\\n use_cuda=use_cuda,\\n train_size=train_size,\\n **trainer_kwargs,\\n )\\n\\n trainer.train(n_epochs=n_epochs, lr=lr)\\n\\n full = trainer.create_posterior(\\n trainer.model, dataset, indices=np.arange(len(dataset))\\n )\\n latent, batch_indices, labels = full.sequential().get_latent()\\n\\n if copy:\\n adata = adata.copy()\\n\\n adata.obsm['X_scvi'] = latent\\n adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale()\\n adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation()\\n\\n if linear_decoder:\\n loadings = vae.get_loadings()\\n df = pd.DataFrame(loadings, index=adata_subset.var_names)\\n adata.uns['ldvae_loadings'] = df\\n\\n if copy and return_posterior:\\n return adata, full\\n elif copy:\\n return adata\\n elif return_posterior:\\n return full\\n\", \"path\": \"scanpy/external/pp/_scvi.py\"}, {\"content\": \"import sys\\n\\nif sys.version_info < (3, 6):\\n sys.exit('scanpy requires Python >= 3.6')\\nfrom pathlib import Path\\n\\nfrom setuptools import setup, find_packages\\n\\ntry:\\n import pytoml\\nexcept ImportError:\\n sys.exit('Please use `pip install .` or install pytoml first.')\\n\\nproj = pytoml.loads(Path('pyproject.toml').read_text())\\nmetadata = proj['tool']['scanpy']\\n\\nsetup(\\n name='scanpy',\\n use_scm_version=True,\\n setup_requires=['setuptools_scm'],\\n description='Single-Cell Analysis in Python.',\\n long_description=Path('README.rst').read_text('utf-8'),\\n url='http://github.com/theislab/scanpy',\\n author=metadata['author'],\\n author_email=metadata['author-email'],\\n license='BSD',\\n python_requires='>=3.6',\\n install_requires=[\\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\\n ],\\n extras_require=dict(\\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\\n leiden=['python-igraph', 'leidenalg'],\\n bbknn=['bbknn'],\\n scvi=['scvi>=0.6.5'],\\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\\n magic=['magic-impute>=2.0'],\\n skmisc=['scikit-misc>=0.1.3'],\\n harmony=['harmonypy'],\\n scrublet=['scrublet'],\\n dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'],\\n doc=[\\n 'sphinx>=3.2',\\n 'sphinx_rtd_theme>=0.3.1',\\n 'sphinx_autodoc_typehints',\\n 'scanpydoc>=0.5',\\n 'typing_extensions; python_version < \\\"3.8\\\"', # for `Literal`\\n ],\\n test=[\\n 'pytest>=4.4',\\n 'dask[array]!=2.17.0',\\n 'fsspec',\\n 'zappy',\\n 'zarr',\\n 'profimp',\\n ],\\n ),\\n packages=find_packages(),\\n include_package_data=True,\\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\\n zip_safe=False,\\n classifiers=[\\n 'Development Status :: 5 - Production/Stable',\\n 'Environment :: Console',\\n 'Framework :: Jupyter',\\n 'Intended Audience :: Developers',\\n 'Intended Audience :: Science/Research',\\n 'Natural Language :: English',\\n 'Operating System :: MacOS :: MacOS X',\\n 'Operating System :: Microsoft :: Windows',\\n 'Operating System :: POSIX :: Linux',\\n 'Programming Language :: Python :: 3',\\n 'Programming Language :: Python :: 3.5',\\n 'Programming Language :: Python :: 3.6',\\n 'Programming Language :: Python :: 3.7',\\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\\n 'Topic :: Scientific/Engineering :: Visualization',\\n ],\\n)\\n\", \"path\": \"setup.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3716,"string":"3,716"},"num_tokens_diff":{"kind":"number","value":388,"string":"388"}}},{"rowIdx":18155,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_6234"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"liqd__a4-meinberlin-3564"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nmoderators for b-plans\nif an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:\r\na) you don't see moderators in dashboard\r\nb) you can't follow a b-plan/external project\r\nc) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)\r\n\r\nShould we take out this rule for these two templates or stop sending mails?\r\n\r\n\r\n\"bildschirmfoto\r\n\n\n\n\n[start of meinberlin/apps/notifications/signals.py]\n1 from django.contrib.auth import get_user_model\n2 from django.db.models import signals\n3 from django.dispatch import receiver\n4 \n5 from adhocracy4.actions.models import Action\n6 from adhocracy4.actions.verbs import Verbs\n7 from adhocracy4.dashboard import signals as dashboard_signals\n8 from adhocracy4.follows.models import Follow\n9 from adhocracy4.projects.models import Project\n10 \n11 from . import emails\n12 \n13 User = get_user_model()\n14 \n15 \n16 @receiver(signals.post_save, sender=Action)\n17 def send_notifications(instance, created, **kwargs):\n18 action = instance\n19 verb = Verbs(action.verb)\n20 \n21 if action.type in ('item', 'comment') \\\n22 and verb in (Verbs.CREATE, Verbs.ADD):\n23 emails.NotifyCreatorEmail.send(action)\n24 \n25 if action.project:\n26 emails.NotifyModeratorsEmail.send(action)\n27 \n28 elif action.type == 'phase':\n29 if verb == Verbs.START:\n30 emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n31 elif verb == Verbs.SCHEDULE:\n32 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n33 \n34 elif action.type == 'offlineevent' and verb == Verbs.START:\n35 emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n36 \n37 \n38 @receiver(dashboard_signals.project_created)\n39 def send_project_created_notifications(**kwargs):\n40 project = kwargs.get('project')\n41 creator = kwargs.get('user')\n42 emails.NotifyInitiatorsOnProjectCreatedEmail.send(\n43 project, creator_pk=creator.pk)\n44 \n45 \n46 @receiver(signals.m2m_changed, sender=Project.moderators.through)\n47 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n48 if action == 'post_add':\n49 autofollow_project(instance, pk_set, reverse)\n50 \n51 \n52 def autofollow_project(instance, pk_set, reverse):\n53 if not reverse:\n54 project = instance\n55 users_pks = pk_set\n56 \n57 for user_pk in users_pks:\n58 Follow.objects.update_or_create(\n59 project=project,\n60 creator_id=user_pk,\n61 defaults={\n62 'enabled': True\n63 }\n64 )\n65 else:\n66 user = instance\n67 project_pks = pk_set\n68 \n69 for project_pk in project_pks:\n70 Follow.objects.update_or_create(\n71 project_id=project_pk,\n72 creator=user,\n73 defaults={\n74 'enabled': True\n75 }\n76 )\n77\n[end of meinberlin/apps/notifications/signals.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py\n--- a/meinberlin/apps/notifications/signals.py\n+++ b/meinberlin/apps/notifications/signals.py\n@@ -25,7 +25,8 @@\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n \n- elif action.type == 'phase':\n+ elif (action.type == 'phase' and\n+ action.project.project_type == 'a4projects.Project'):\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py\\n--- a/meinberlin/apps/notifications/signals.py\\n+++ b/meinberlin/apps/notifications/signals.py\\n@@ -25,7 +25,8 @@\\n if action.project:\\n emails.NotifyModeratorsEmail.send(action)\\n \\n- elif action.type == 'phase':\\n+ elif (action.type == 'phase' and\\n+ action.project.project_type == 'a4projects.Project'):\\n if verb == Verbs.START:\\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\\n elif verb == Verbs.SCHEDULE:\\n\", \"issue\": \"moderators for b-plans\\nif an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:\\r\\na) you don't see moderators in dashboard\\r\\nb) you can't follow a b-plan/external project\\r\\nc) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)\\r\\n\\r\\nShould we take out this rule for these two templates or stop sending mails?\\r\\n\\r\\n\\r\\n\\\"bildschirmfoto\\r\\n\\n\", \"before_files\": [{\"content\": \"from django.contrib.auth import get_user_model\\nfrom django.db.models import signals\\nfrom django.dispatch import receiver\\n\\nfrom adhocracy4.actions.models import Action\\nfrom adhocracy4.actions.verbs import Verbs\\nfrom adhocracy4.dashboard import signals as dashboard_signals\\nfrom adhocracy4.follows.models import Follow\\nfrom adhocracy4.projects.models import Project\\n\\nfrom . import emails\\n\\nUser = get_user_model()\\n\\n\\n@receiver(signals.post_save, sender=Action)\\ndef send_notifications(instance, created, **kwargs):\\n action = instance\\n verb = Verbs(action.verb)\\n\\n if action.type in ('item', 'comment') \\\\\\n and verb in (Verbs.CREATE, Verbs.ADD):\\n emails.NotifyCreatorEmail.send(action)\\n\\n if action.project:\\n emails.NotifyModeratorsEmail.send(action)\\n\\n elif action.type == 'phase':\\n if verb == Verbs.START:\\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\\n elif verb == Verbs.SCHEDULE:\\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\\n\\n elif action.type == 'offlineevent' and verb == Verbs.START:\\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\\n\\n\\n@receiver(dashboard_signals.project_created)\\ndef send_project_created_notifications(**kwargs):\\n project = kwargs.get('project')\\n creator = kwargs.get('user')\\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(\\n project, creator_pk=creator.pk)\\n\\n\\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\\n if action == 'post_add':\\n autofollow_project(instance, pk_set, reverse)\\n\\n\\ndef autofollow_project(instance, pk_set, reverse):\\n if not reverse:\\n project = instance\\n users_pks = pk_set\\n\\n for user_pk in users_pks:\\n Follow.objects.update_or_create(\\n project=project,\\n creator_id=user_pk,\\n defaults={\\n 'enabled': True\\n }\\n )\\n else:\\n user = instance\\n project_pks = pk_set\\n\\n for project_pk in project_pks:\\n Follow.objects.update_or_create(\\n project_id=project_pk,\\n creator=user,\\n defaults={\\n 'enabled': True\\n }\\n )\\n\", \"path\": \"meinberlin/apps/notifications/signals.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1431,"string":"1,431"},"num_tokens_diff":{"kind":"number","value":142,"string":"142"}}},{"rowIdx":18156,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_16491"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"aws-cloudformation__cfn-lint-734"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nE0000 found unknown escape character ‘/’\nversion:1:1\r\n\r\ncfn-lint --template vpc.cf.json\r\nE0000 found unknown escape character ‘/’\r\nvpc.cf.json:12:135\r\n\r\nthis is the string that it says container the escape character error. this however works fine when deployed to the CFN service. \r\n\r\n\"^([0-9]{1,3}\\\\.){3}[0-9]{1,3}(\\\\\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\r\n\r\n![image](https://user-images.githubusercontent.com/42137702/54364854-a11a3000-4665-11e9-8454-9ab8033fc1e7.png)\r\n\n\n\n\n[start of src/cfnlint/decode/__init__.py]\n1 \"\"\"\n2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n3 \n4 Permission is hereby granted, free of charge, to any person obtaining a copy of this\n5 software and associated documentation files (the \"Software\"), to deal in the Software\n6 without restriction, including without limitation the rights to use, copy, modify,\n7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n8 permit persons to whom the Software is furnished to do so.\n9 \n10 THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n16 \"\"\"\n17 import sys\n18 import logging\n19 import six\n20 try:\n21 from json.decoder import JSONDecodeError\n22 except ImportError:\n23 JSONDecodeError = ValueError\n24 from yaml.parser import ParserError, ScannerError\n25 from yaml import YAMLError\n26 import cfnlint.decode.cfn_yaml\n27 import cfnlint.decode.cfn_json\n28 \n29 \n30 LOGGER = logging.getLogger(__name__)\n31 \n32 \n33 def decode(filename, ignore_bad_template):\n34 \"\"\"\n35 Decode filename into an object\n36 \"\"\"\n37 template = None\n38 matches = []\n39 try:\n40 template = cfnlint.decode.cfn_yaml.load(filename)\n41 except IOError as e:\n42 if e.errno == 2:\n43 LOGGER.error('Template file not found: %s', filename)\n44 matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\n45 elif e.errno == 21:\n46 LOGGER.error('Template references a directory, not a file: %s', filename)\n47 matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\n48 elif e.errno == 13:\n49 LOGGER.error('Permission denied when accessing template file: %s', filename)\n50 matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\n51 \n52 if matches:\n53 return(None, matches)\n54 except UnicodeDecodeError as err:\n55 LOGGER.error('Cannot read file contents: %s', filename)\n56 matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\n57 except cfnlint.decode.cfn_yaml.CfnParseError as err:\n58 err.match.Filename = filename\n59 matches = [err.match]\n60 \n61 except ParserError as err:\n62 matches = [create_match_yaml_parser_error(err, filename)]\n63 except ScannerError as err:\n64 if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n65 try:\n66 template = cfnlint.decode.cfn_json.load(filename)\n67 except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n68 json_err.match.filename = filename\n69 matches = [json_err.match]\n70 except JSONDecodeError as json_err:\n71 matches = [create_match_json_parser_error(json_err, filename)]\n72 except Exception as json_err: # pylint: disable=W0703\n73 if ignore_bad_template:\n74 LOGGER.info('Template %s is malformed: %s', filename, err.problem)\n75 LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n76 else:\n77 LOGGER.error('Template %s is malformed: %s', filename, err.problem)\n78 LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n79 return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\n80 else:\n81 matches = [create_match_yaml_parser_error(err, filename)]\n82 except YAMLError as err:\n83 matches = [create_match_file_error(filename, err)]\n84 \n85 if not isinstance(template, dict) and not matches:\n86 # Template isn't a dict which means nearly nothing will work\n87 matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\n88 return (template, matches)\n89 \n90 \n91 def create_match_yaml_parser_error(parser_error, filename):\n92 \"\"\"Create a Match for a parser error\"\"\"\n93 lineno = parser_error.problem_mark.line + 1\n94 colno = parser_error.problem_mark.column + 1\n95 msg = parser_error.problem\n96 return cfnlint.Match(\n97 lineno, colno, lineno, colno + 1, filename,\n98 cfnlint.ParseError(), message=msg)\n99 \n100 \n101 def create_match_file_error(filename, msg):\n102 \"\"\"Create a Match for a parser error\"\"\"\n103 return cfnlint.Match(\n104 linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\n105 filename=filename, rule=cfnlint.ParseError(), message=msg)\n106 \n107 \n108 def create_match_json_parser_error(parser_error, filename):\n109 \"\"\"Create a Match for a parser error\"\"\"\n110 if sys.version_info[0] == 3:\n111 lineno = parser_error.lineno\n112 colno = parser_error.colno\n113 msg = parser_error.msg\n114 elif sys.version_info[0] == 2:\n115 lineno = 1\n116 colno = 1\n117 msg = parser_error.message\n118 return cfnlint.Match(\n119 lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\n120\n[end of src/cfnlint/decode/__init__.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py\n--- a/src/cfnlint/decode/__init__.py\n+++ b/src/cfnlint/decode/__init__.py\n@@ -57,11 +57,12 @@\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n-\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n- if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n+ if err.problem in [\n+ 'found character \\'\\\\t\\' that cannot start any token',\n+ 'found unknown escape character']:\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py\\n--- a/src/cfnlint/decode/__init__.py\\n+++ b/src/cfnlint/decode/__init__.py\\n@@ -57,11 +57,12 @@\\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\\n err.match.Filename = filename\\n matches = [err.match]\\n-\\n except ParserError as err:\\n matches = [create_match_yaml_parser_error(err, filename)]\\n except ScannerError as err:\\n- if err.problem == 'found character \\\\'\\\\\\\\t\\\\' that cannot start any token':\\n+ if err.problem in [\\n+ 'found character \\\\'\\\\\\\\t\\\\' that cannot start any token',\\n+ 'found unknown escape character']:\\n try:\\n template = cfnlint.decode.cfn_json.load(filename)\\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\\n\", \"issue\": \"E0000 found unknown escape character \\u2018/\\u2019\\nversion:1:1\\r\\n\\r\\ncfn-lint --template vpc.cf.json\\r\\nE0000 found unknown escape character \\u2018/\\u2019\\r\\nvpc.cf.json:12:135\\r\\n\\r\\nthis is the string that it says container the escape character error. this however works fine when deployed to the CFN service. \\r\\n\\r\\n\\\"^([0-9]{1,3}\\\\\\\\.){3}[0-9]{1,3}(\\\\\\\\\\\\/([0-9]|[1-2][0-9]|3[0-2]))?$\\\"\\r\\n\\r\\n![image](https://user-images.githubusercontent.com/42137702/54364854-a11a3000-4665-11e9-8454-9ab8033fc1e7.png)\\r\\n\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"\\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\\n\\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\\n software and associated documentation files (the \\\"Software\\\"), to deal in the Software\\n without restriction, including without limitation the rights to use, copy, modify,\\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\\n permit persons to whom the Software is furnished to do so.\\n\\n THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\\n\\\"\\\"\\\"\\nimport sys\\nimport logging\\nimport six\\ntry:\\n from json.decoder import JSONDecodeError\\nexcept ImportError:\\n JSONDecodeError = ValueError\\nfrom yaml.parser import ParserError, ScannerError\\nfrom yaml import YAMLError\\nimport cfnlint.decode.cfn_yaml\\nimport cfnlint.decode.cfn_json\\n\\n\\nLOGGER = logging.getLogger(__name__)\\n\\n\\ndef decode(filename, ignore_bad_template):\\n \\\"\\\"\\\"\\n Decode filename into an object\\n \\\"\\\"\\\"\\n template = None\\n matches = []\\n try:\\n template = cfnlint.decode.cfn_yaml.load(filename)\\n except IOError as e:\\n if e.errno == 2:\\n LOGGER.error('Template file not found: %s', filename)\\n matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\\n elif e.errno == 21:\\n LOGGER.error('Template references a directory, not a file: %s', filename)\\n matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\\n elif e.errno == 13:\\n LOGGER.error('Permission denied when accessing template file: %s', filename)\\n matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\\n\\n if matches:\\n return(None, matches)\\n except UnicodeDecodeError as err:\\n LOGGER.error('Cannot read file contents: %s', filename)\\n matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\\n err.match.Filename = filename\\n matches = [err.match]\\n\\n except ParserError as err:\\n matches = [create_match_yaml_parser_error(err, filename)]\\n except ScannerError as err:\\n if err.problem == 'found character \\\\'\\\\\\\\t\\\\' that cannot start any token':\\n try:\\n template = cfnlint.decode.cfn_json.load(filename)\\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\\n json_err.match.filename = filename\\n matches = [json_err.match]\\n except JSONDecodeError as json_err:\\n matches = [create_match_json_parser_error(json_err, filename)]\\n except Exception as json_err: # pylint: disable=W0703\\n if ignore_bad_template:\\n LOGGER.info('Template %s is malformed: %s', filename, err.problem)\\n LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\\n else:\\n LOGGER.error('Template %s is malformed: %s', filename, err.problem)\\n LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\\n return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\\n else:\\n matches = [create_match_yaml_parser_error(err, filename)]\\n except YAMLError as err:\\n matches = [create_match_file_error(filename, err)]\\n\\n if not isinstance(template, dict) and not matches:\\n # Template isn't a dict which means nearly nothing will work\\n matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\\n return (template, matches)\\n\\n\\ndef create_match_yaml_parser_error(parser_error, filename):\\n \\\"\\\"\\\"Create a Match for a parser error\\\"\\\"\\\"\\n lineno = parser_error.problem_mark.line + 1\\n colno = parser_error.problem_mark.column + 1\\n msg = parser_error.problem\\n return cfnlint.Match(\\n lineno, colno, lineno, colno + 1, filename,\\n cfnlint.ParseError(), message=msg)\\n\\n\\ndef create_match_file_error(filename, msg):\\n \\\"\\\"\\\"Create a Match for a parser error\\\"\\\"\\\"\\n return cfnlint.Match(\\n linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\\n filename=filename, rule=cfnlint.ParseError(), message=msg)\\n\\n\\ndef create_match_json_parser_error(parser_error, filename):\\n \\\"\\\"\\\"Create a Match for a parser error\\\"\\\"\\\"\\n if sys.version_info[0] == 3:\\n lineno = parser_error.lineno\\n colno = parser_error.colno\\n msg = parser_error.msg\\n elif sys.version_info[0] == 2:\\n lineno = 1\\n colno = 1\\n msg = parser_error.message\\n return cfnlint.Match(\\n lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\\n\", \"path\": \"src/cfnlint/decode/__init__.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2203,"string":"2,203"},"num_tokens_diff":{"kind":"number","value":206,"string":"206"}}},{"rowIdx":18157,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_6009"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"google__osv.dev-1021"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nMaven 'zero' versions causes errors when comparing\nMaven versions that are 'zero' (e.g. `0.0.0` and `0.0.0-X.Y.Z`) cause assertion errors when comparing with non-zero versions, because the comparisons assume there's always an initial token without a `.` or `-` 'prefix'.\r\n\r\nThis seems to be causing some of the missing vulnerabilities in #1018\n\n\n\n[start of osv/ecosystems/maven.py]\n1 # Copyright 2021 Google LLC\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 \"\"\"Maven ecosystem helper.\"\"\"\n15 \n16 import collections\n17 import json\n18 import re\n19 \n20 import urllib.parse\n21 from . import config\n22 from .helper_base import DepsDevMixin, EnumerateError\n23 from ..cache import cached\n24 from ..request_helper import RequestHelper\n25 \n26 \n27 # pylint: disable=line-too-long\n28 # Maven's very complicated spec:\n29 # http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification\n30 \n31 _TO_TRIM = ('0', '', 'final', 'ga')\n32 _KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp')\n33 \n34 \n35 def qualifier_order(token):\n36 \"\"\"Returns an integer representing a token's order.\"\"\"\n37 # \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n38 if token.value.isdigit():\n39 if token.prefix == '-':\n40 return 2\n41 \n42 assert token.prefix == '.'\n43 return 3\n44 \n45 if token.prefix == '-':\n46 return 1\n47 \n48 assert token.prefix == '.'\n49 return 0\n50 \n51 \n52 class VersionToken(\n53 collections.namedtuple(\n54 'VersionToken', 'prefix value is_null', defaults=(False,))):\n55 \"\"\"Version token.\"\"\"\n56 \n57 __slots__ = ()\n58 \n59 def __eq__(self, other):\n60 return self.prefix == other.prefix and self.value == other.value\n61 \n62 def __lt__(self, other):\n63 if self.prefix == other.prefix:\n64 # if the prefix is the same, then compare the token:\n65 if self.value.isdigit() and other.value.isdigit():\n66 # Numeric tokens have the natural order.\n67 return int(self.value) < int(other.value)\n68 # The spec is unclear, but according to Maven's implementation, numerics\n69 # sort after non-numerics, **unless it's a null value**.\n70 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443\n71 if self.value.isdigit() and not self.is_null:\n72 return False\n73 \n74 if other.value.isdigit() and not other.is_null:\n75 return True\n76 \n77 # Non-numeric tokens (\"qualifiers\") have the alphabetical order, except\n78 # for the following tokens which come first in _KEYWORD_ORDER.\n79 #\n80 # The spec is unclear, but according to Maven's implementation, unknown\n81 # qualifiers sort after known qualifiers:\n82 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423\n83 try:\n84 left_idx = _KEYWORD_ORDER.index(self.value)\n85 except ValueError:\n86 left_idx = len(_KEYWORD_ORDER)\n87 \n88 try:\n89 right_idx = _KEYWORD_ORDER.index(other.value)\n90 except ValueError:\n91 right_idx = len(_KEYWORD_ORDER)\n92 \n93 if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER):\n94 # Both are unknown qualifiers. Just do a lexical comparison.\n95 return self.value < other.value\n96 \n97 return left_idx < right_idx\n98 \n99 # else \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n100 return qualifier_order(self) < qualifier_order(other)\n101 \n102 \n103 class Version:\n104 \"\"\"Maven version.\"\"\"\n105 \n106 def __init__(self):\n107 self.tokens = []\n108 \n109 def __str__(self):\n110 result = ''\n111 for token in self.tokens:\n112 result += token.prefix + token.value\n113 \n114 return result\n115 \n116 def __eq__(self, other):\n117 return self.tokens == other.tokens\n118 \n119 def __lt__(self, other):\n120 for i in range(max(len(self.tokens), len(other.tokens))):\n121 # the shorter one padded with enough \"null\" values with matching prefix to\n122 # have the same length as the longer one. Padded \"null\" values depend on\n123 # the prefix of the other version: 0 for '.', \"\" for '-'\n124 if i >= len(self.tokens):\n125 if other.tokens[i].prefix == '.':\n126 left = VersionToken('.', '0', is_null=True)\n127 else:\n128 assert other.tokens[i].prefix == '-'\n129 left = VersionToken('-', '', is_null=True)\n130 else:\n131 left = self.tokens[i]\n132 \n133 if i >= len(other.tokens):\n134 if self.tokens[i].prefix == '.':\n135 right = VersionToken('.', '0', is_null=True)\n136 else:\n137 assert self.tokens[i].prefix == '-'\n138 right = VersionToken('-', '', is_null=True)\n139 else:\n140 right = other.tokens[i]\n141 \n142 if left == right:\n143 continue\n144 \n145 return left < right\n146 \n147 @classmethod\n148 def from_string(cls, str_version):\n149 \"\"\"Parse a version.\"\"\"\n150 version = Version()\n151 \n152 # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-')\n153 # and transitions between digits and characters. The prefix is recorded\n154 # and will have effect on the order.\n155 \n156 # Split and keep the delimiter.\n157 tokens = re.split(r'([-.])', str_version)\n158 for i in range(0, len(tokens), 2):\n159 if i == 0:\n160 # First token has no preceding prefix.\n161 prefix = ''\n162 else:\n163 # Preceding prefix.\n164 prefix = tokens[i - 1]\n165 \n166 # A transition between digits and characters is equivalent to a hyphen.\n167 # According to Maven's implementation: any non-digit is a \"character\":\n168 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627\n169 \n170 # Find instances of or .\n171 # ?= makes the regex non-consuming (needed to catch overlapping\n172 # transitions such as ).\n173 # This gives an array of indices where each index is where a hyphen should be.\n174 transitions = [\n175 m.span()[0] + 1\n176 for m in re.finditer(r'(?=(\\d[^\\d]|[^\\d]\\d))', tokens[i])\n177 ]\n178 # Add the last index so that our algorithm to split up the current token works.\n179 transitions.append(len(tokens[i]))\n180 \n181 prev_index = 0\n182 for j, transition in enumerate(transitions):\n183 if j > 0:\n184 prefix = '-'\n185 \n186 # The spec doesn't say this, but all qualifiers are case insensitive.\n187 current = tokens[i][prev_index:transition].lower()\n188 if not current:\n189 # Empty tokens are replaced with \"0\".\n190 current = '0'\n191 \n192 # Normalize \"cr\" to \"rc\" for easier comparison since they are equal in\n193 # precedence.\n194 if current == 'cr':\n195 current = 'rc'\n196 \n197 # Also do this for 'ga', 'final' which are equivalent to empty string.\n198 # \"release\" is not part of the spec but is implemented by Maven.\n199 if current in ('ga', 'final', 'release'):\n200 current = ''\n201 \n202 # the \"alpha\", \"beta\" and \"milestone\" qualifiers can respectively be\n203 # shortened to \"a\", \"b\" and \"m\" when directly followed by a number.\n204 if transition != len(tokens[i]):\n205 if current == 'a':\n206 current = 'alpha'\n207 \n208 if current == 'b':\n209 current = 'beta'\n210 \n211 if current == 'm':\n212 current = 'milestone'\n213 \n214 if current.isdigit():\n215 # Remove any leading zeros.\n216 current = str(int(current))\n217 \n218 version.tokens.append(VersionToken(prefix, current))\n219 prev_index = transition\n220 \n221 # Then, starting from the end of the version, the trailing \"null\" values\n222 # (0, \"\", \"final\", \"ga\") are trimmed.\n223 i = len(version.tokens) - 1\n224 while i >= 0:\n225 if version.tokens[i].value in _TO_TRIM:\n226 version.tokens.pop(i)\n227 i -= 1\n228 continue\n229 \n230 # This process is repeated at each remaining hyphen from end to start.\n231 while i >= 0 and version.tokens[i].prefix != '-':\n232 i -= 1\n233 \n234 i -= 1\n235 \n236 return version\n237 \n238 \n239 class Maven(DepsDevMixin):\n240 \"\"\"Maven ecosystem.\"\"\"\n241 \n242 _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select'\n243 \n244 def sort_key(self, version):\n245 \"\"\"Sort key.\"\"\"\n246 return Version.from_string(version)\n247 \n248 @staticmethod\n249 def _get_versions(package):\n250 \"\"\"Get versions.\"\"\"\n251 versions = []\n252 request_helper = RequestHelper()\n253 \n254 group_id, artifact_id = package.split(':', 2)\n255 start = 0\n256 \n257 while True:\n258 query = {\n259 'q': f'g:\"{group_id}\" AND a:\"{artifact_id}\"',\n260 'core': 'gav',\n261 'rows': '20',\n262 'wt': 'json',\n263 'start': start\n264 }\n265 url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query)\n266 response = request_helper.get(url)\n267 response = json.loads(response)['response']\n268 if response['numFound'] == 0:\n269 raise EnumerateError(f'Package {package} not found')\n270 \n271 for result in response['docs']:\n272 versions.append(result['v'])\n273 \n274 if len(versions) >= response['numFound']:\n275 break\n276 \n277 start = len(versions)\n278 \n279 return versions\n280 \n281 def enumerate_versions(self,\n282 package,\n283 introduced,\n284 fixed=None,\n285 last_affected=None,\n286 limits=None):\n287 \"\"\"Enumerate versions.\"\"\"\n288 if config.use_deps_dev:\n289 return self._deps_dev_enumerate(\n290 package, introduced, fixed, last_affected, limits=limits)\n291 \n292 get_versions = self._get_versions\n293 if config.shared_cache:\n294 get_versions = cached(config.shared_cache)(get_versions)\n295 \n296 versions = get_versions(package)\n297 self.sort_versions(versions)\n298 return self._get_affected_versions(versions, introduced, fixed,\n299 last_affected, limits)\n300\n[end of osv/ecosystems/maven.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/osv/ecosystems/maven.py b/osv/ecosystems/maven.py\n--- a/osv/ecosystems/maven.py\n+++ b/osv/ecosystems/maven.py\n@@ -221,7 +221,7 @@\n # Then, starting from the end of the version, the trailing \"null\" values\n # (0, \"\", \"final\", \"ga\") are trimmed.\n i = len(version.tokens) - 1\n- while i >= 0:\n+ while i > 0: # We always want at least one token for comparison\n if version.tokens[i].value in _TO_TRIM:\n version.tokens.pop(i)\n i -= 1\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/osv/ecosystems/maven.py b/osv/ecosystems/maven.py\\n--- a/osv/ecosystems/maven.py\\n+++ b/osv/ecosystems/maven.py\\n@@ -221,7 +221,7 @@\\n # Then, starting from the end of the version, the trailing \\\"null\\\" values\\n # (0, \\\"\\\", \\\"final\\\", \\\"ga\\\") are trimmed.\\n i = len(version.tokens) - 1\\n- while i >= 0:\\n+ while i > 0: # We always want at least one token for comparison\\n if version.tokens[i].value in _TO_TRIM:\\n version.tokens.pop(i)\\n i -= 1\\n\", \"issue\": \"Maven 'zero' versions causes errors when comparing\\nMaven versions that are 'zero' (e.g. `0.0.0` and `0.0.0-X.Y.Z`) cause assertion errors when comparing with non-zero versions, because the comparisons assume there's always an initial token without a `.` or `-` 'prefix'.\\r\\n\\r\\nThis seems to be causing some of the missing vulnerabilities in #1018\\n\", \"before_files\": [{\"content\": \"# Copyright 2021 Google LLC\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\\"\\\"\\\"Maven ecosystem helper.\\\"\\\"\\\"\\n\\nimport collections\\nimport json\\nimport re\\n\\nimport urllib.parse\\nfrom . import config\\nfrom .helper_base import DepsDevMixin, EnumerateError\\nfrom ..cache import cached\\nfrom ..request_helper import RequestHelper\\n\\n\\n# pylint: disable=line-too-long\\n# Maven's very complicated spec:\\n# http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification\\n\\n_TO_TRIM = ('0', '', 'final', 'ga')\\n_KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp')\\n\\n\\ndef qualifier_order(token):\\n \\\"\\\"\\\"Returns an integer representing a token's order.\\\"\\\"\\\"\\n # \\\".qualifier\\\" < \\\"-qualifier\\\" < \\\"-number\\\" < \\\".number\\\"\\n if token.value.isdigit():\\n if token.prefix == '-':\\n return 2\\n\\n assert token.prefix == '.'\\n return 3\\n\\n if token.prefix == '-':\\n return 1\\n\\n assert token.prefix == '.'\\n return 0\\n\\n\\nclass VersionToken(\\n collections.namedtuple(\\n 'VersionToken', 'prefix value is_null', defaults=(False,))):\\n \\\"\\\"\\\"Version token.\\\"\\\"\\\"\\n\\n __slots__ = ()\\n\\n def __eq__(self, other):\\n return self.prefix == other.prefix and self.value == other.value\\n\\n def __lt__(self, other):\\n if self.prefix == other.prefix:\\n # if the prefix is the same, then compare the token:\\n if self.value.isdigit() and other.value.isdigit():\\n # Numeric tokens have the natural order.\\n return int(self.value) < int(other.value)\\n # The spec is unclear, but according to Maven's implementation, numerics\\n # sort after non-numerics, **unless it's a null value**.\\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443\\n if self.value.isdigit() and not self.is_null:\\n return False\\n\\n if other.value.isdigit() and not other.is_null:\\n return True\\n\\n # Non-numeric tokens (\\\"qualifiers\\\") have the alphabetical order, except\\n # for the following tokens which come first in _KEYWORD_ORDER.\\n #\\n # The spec is unclear, but according to Maven's implementation, unknown\\n # qualifiers sort after known qualifiers:\\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423\\n try:\\n left_idx = _KEYWORD_ORDER.index(self.value)\\n except ValueError:\\n left_idx = len(_KEYWORD_ORDER)\\n\\n try:\\n right_idx = _KEYWORD_ORDER.index(other.value)\\n except ValueError:\\n right_idx = len(_KEYWORD_ORDER)\\n\\n if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER):\\n # Both are unknown qualifiers. Just do a lexical comparison.\\n return self.value < other.value\\n\\n return left_idx < right_idx\\n\\n # else \\\".qualifier\\\" < \\\"-qualifier\\\" < \\\"-number\\\" < \\\".number\\\"\\n return qualifier_order(self) < qualifier_order(other)\\n\\n\\nclass Version:\\n \\\"\\\"\\\"Maven version.\\\"\\\"\\\"\\n\\n def __init__(self):\\n self.tokens = []\\n\\n def __str__(self):\\n result = ''\\n for token in self.tokens:\\n result += token.prefix + token.value\\n\\n return result\\n\\n def __eq__(self, other):\\n return self.tokens == other.tokens\\n\\n def __lt__(self, other):\\n for i in range(max(len(self.tokens), len(other.tokens))):\\n # the shorter one padded with enough \\\"null\\\" values with matching prefix to\\n # have the same length as the longer one. Padded \\\"null\\\" values depend on\\n # the prefix of the other version: 0 for '.', \\\"\\\" for '-'\\n if i >= len(self.tokens):\\n if other.tokens[i].prefix == '.':\\n left = VersionToken('.', '0', is_null=True)\\n else:\\n assert other.tokens[i].prefix == '-'\\n left = VersionToken('-', '', is_null=True)\\n else:\\n left = self.tokens[i]\\n\\n if i >= len(other.tokens):\\n if self.tokens[i].prefix == '.':\\n right = VersionToken('.', '0', is_null=True)\\n else:\\n assert self.tokens[i].prefix == '-'\\n right = VersionToken('-', '', is_null=True)\\n else:\\n right = other.tokens[i]\\n\\n if left == right:\\n continue\\n\\n return left < right\\n\\n @classmethod\\n def from_string(cls, str_version):\\n \\\"\\\"\\\"Parse a version.\\\"\\\"\\\"\\n version = Version()\\n\\n # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-')\\n # and transitions between digits and characters. The prefix is recorded\\n # and will have effect on the order.\\n\\n # Split and keep the delimiter.\\n tokens = re.split(r'([-.])', str_version)\\n for i in range(0, len(tokens), 2):\\n if i == 0:\\n # First token has no preceding prefix.\\n prefix = ''\\n else:\\n # Preceding prefix.\\n prefix = tokens[i - 1]\\n\\n # A transition between digits and characters is equivalent to a hyphen.\\n # According to Maven's implementation: any non-digit is a \\\"character\\\":\\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627\\n\\n # Find instances of or .\\n # ?= makes the regex non-consuming (needed to catch overlapping\\n # transitions such as ).\\n # This gives an array of indices where each index is where a hyphen should be.\\n transitions = [\\n m.span()[0] + 1\\n for m in re.finditer(r'(?=(\\\\d[^\\\\d]|[^\\\\d]\\\\d))', tokens[i])\\n ]\\n # Add the last index so that our algorithm to split up the current token works.\\n transitions.append(len(tokens[i]))\\n\\n prev_index = 0\\n for j, transition in enumerate(transitions):\\n if j > 0:\\n prefix = '-'\\n\\n # The spec doesn't say this, but all qualifiers are case insensitive.\\n current = tokens[i][prev_index:transition].lower()\\n if not current:\\n # Empty tokens are replaced with \\\"0\\\".\\n current = '0'\\n\\n # Normalize \\\"cr\\\" to \\\"rc\\\" for easier comparison since they are equal in\\n # precedence.\\n if current == 'cr':\\n current = 'rc'\\n\\n # Also do this for 'ga', 'final' which are equivalent to empty string.\\n # \\\"release\\\" is not part of the spec but is implemented by Maven.\\n if current in ('ga', 'final', 'release'):\\n current = ''\\n\\n # the \\\"alpha\\\", \\\"beta\\\" and \\\"milestone\\\" qualifiers can respectively be\\n # shortened to \\\"a\\\", \\\"b\\\" and \\\"m\\\" when directly followed by a number.\\n if transition != len(tokens[i]):\\n if current == 'a':\\n current = 'alpha'\\n\\n if current == 'b':\\n current = 'beta'\\n\\n if current == 'm':\\n current = 'milestone'\\n\\n if current.isdigit():\\n # Remove any leading zeros.\\n current = str(int(current))\\n\\n version.tokens.append(VersionToken(prefix, current))\\n prev_index = transition\\n\\n # Then, starting from the end of the version, the trailing \\\"null\\\" values\\n # (0, \\\"\\\", \\\"final\\\", \\\"ga\\\") are trimmed.\\n i = len(version.tokens) - 1\\n while i >= 0:\\n if version.tokens[i].value in _TO_TRIM:\\n version.tokens.pop(i)\\n i -= 1\\n continue\\n\\n # This process is repeated at each remaining hyphen from end to start.\\n while i >= 0 and version.tokens[i].prefix != '-':\\n i -= 1\\n\\n i -= 1\\n\\n return version\\n\\n\\nclass Maven(DepsDevMixin):\\n \\\"\\\"\\\"Maven ecosystem.\\\"\\\"\\\"\\n\\n _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select'\\n\\n def sort_key(self, version):\\n \\\"\\\"\\\"Sort key.\\\"\\\"\\\"\\n return Version.from_string(version)\\n\\n @staticmethod\\n def _get_versions(package):\\n \\\"\\\"\\\"Get versions.\\\"\\\"\\\"\\n versions = []\\n request_helper = RequestHelper()\\n\\n group_id, artifact_id = package.split(':', 2)\\n start = 0\\n\\n while True:\\n query = {\\n 'q': f'g:\\\"{group_id}\\\" AND a:\\\"{artifact_id}\\\"',\\n 'core': 'gav',\\n 'rows': '20',\\n 'wt': 'json',\\n 'start': start\\n }\\n url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query)\\n response = request_helper.get(url)\\n response = json.loads(response)['response']\\n if response['numFound'] == 0:\\n raise EnumerateError(f'Package {package} not found')\\n\\n for result in response['docs']:\\n versions.append(result['v'])\\n\\n if len(versions) >= response['numFound']:\\n break\\n\\n start = len(versions)\\n\\n return versions\\n\\n def enumerate_versions(self,\\n package,\\n introduced,\\n fixed=None,\\n last_affected=None,\\n limits=None):\\n \\\"\\\"\\\"Enumerate versions.\\\"\\\"\\\"\\n if config.use_deps_dev:\\n return self._deps_dev_enumerate(\\n package, introduced, fixed, last_affected, limits=limits)\\n\\n get_versions = self._get_versions\\n if config.shared_cache:\\n get_versions = cached(config.shared_cache)(get_versions)\\n\\n versions = get_versions(package)\\n self.sort_versions(versions)\\n return self._get_affected_versions(versions, introduced, fixed,\\n last_affected, limits)\\n\", \"path\": \"osv/ecosystems/maven.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3834,"string":"3,834"},"num_tokens_diff":{"kind":"number","value":156,"string":"156"}}},{"rowIdx":18158,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_2520"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"cal-itp__benefits-1215"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nRefactor Agency dynamic headline into model prop\nRight now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):\r\n\r\n```python\r\npage = viewmodels.Page(\r\n title=_(\"core.pages.agency_index.title\"),\r\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\r\n button=button,\r\n classes=\"home\",\r\n )\r\n```\r\n\r\nThis is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.\r\n\r\n## Acceptance Criteria\r\n\r\n\r\n\r\n- [ ] `agency_index` gets its headline from the selected `agency`\r\n\n\n\n\n[start of benefits/core/views.py]\n1 \"\"\"\n2 The core application: view definition for the root of the webapp.\n3 \"\"\"\n4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\n5 from django.template import loader\n6 from django.template.response import TemplateResponse\n7 from django.urls import reverse\n8 from django.utils.translation import pgettext, gettext as _\n9 \n10 from . import models, session, viewmodels\n11 from .middleware import pageview_decorator\n12 \n13 ROUTE_INDEX = \"core:index\"\n14 ROUTE_ELIGIBILITY = \"eligibility:index\"\n15 ROUTE_HELP = \"core:help\"\n16 ROUTE_LOGGED_OUT = \"core:logged_out\"\n17 \n18 TEMPLATE_INDEX = \"core/index.html\"\n19 TEMPLATE_AGENCY = \"core/agency_index.html\"\n20 TEMPLATE_HELP = \"core/help.html\"\n21 TEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n22 \n23 \n24 @pageview_decorator\n25 def index(request):\n26 \"\"\"View handler for the main entry page.\"\"\"\n27 session.reset(request)\n28 \n29 page = viewmodels.Page(\n30 title=_(\"core.pages.index.title\"),\n31 headline=_(\"core.pages.index.headline\"),\n32 modal=viewmodels.AgencySelector(\n33 id=\"agency-selector\",\n34 aria_labelledby_id=\"agency-selector-modal-label\",\n35 button_text=_(\"core.pages.index.button\"),\n36 ),\n37 )\n38 \n39 return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\n40 \n41 \n42 @pageview_decorator\n43 def agency_index(request, agency):\n44 \"\"\"View handler for an agency entry page.\"\"\"\n45 session.reset(request)\n46 session.update(request, agency=agency, origin=agency.index_url)\n47 \n48 button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n49 \n50 page = viewmodels.Page(\n51 title=_(\"core.pages.agency_index.title\"),\n52 headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n53 button=button,\n54 )\n55 \n56 return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n57 \n58 \n59 @pageview_decorator\n60 def agency_public_key(request, agency):\n61 \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n62 return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n63 \n64 \n65 @pageview_decorator\n66 def help(request):\n67 \"\"\"View handler for the help page.\"\"\"\n68 if session.active_agency(request):\n69 agency = session.agency(request)\n70 buttons = viewmodels.Button.agency_contact_links(agency)\n71 else:\n72 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n73 \n74 buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n75 \n76 page = viewmodels.Page(\n77 title=_(\"core.buttons.help\"),\n78 headline=_(\"core.buttons.help\"),\n79 buttons=buttons,\n80 )\n81 \n82 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n83 \n84 \n85 @pageview_decorator\n86 def bad_request(request, exception, template_name=\"400.html\"):\n87 \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n88 if session.active_agency(request):\n89 session.update(request, origin=session.agency(request).index_url)\n90 else:\n91 session.update(request, origin=reverse(ROUTE_INDEX))\n92 \n93 home = viewmodels.Button.home(request)\n94 page = viewmodels.ErrorPage.server_error(button=home)\n95 t = loader.get_template(template_name)\n96 \n97 return HttpResponseBadRequest(t.render(page.context_dict()))\n98 \n99 \n100 @pageview_decorator\n101 def csrf_failure(request, reason):\n102 \"\"\"\n103 View handler for CSRF_FAILURE_VIEW with custom data.\n104 \"\"\"\n105 if session.active_agency(request):\n106 session.update(request, origin=session.agency(request).index_url)\n107 else:\n108 session.update(request, origin=reverse(ROUTE_INDEX))\n109 \n110 home = viewmodels.Button.home(request)\n111 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n112 t = loader.get_template(\"400.html\")\n113 \n114 return HttpResponseNotFound(t.render(page.context_dict()))\n115 \n116 \n117 @pageview_decorator\n118 def page_not_found(request, exception, template_name=\"404.html\"):\n119 \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n120 if session.active_agency(request):\n121 session.update(request, origin=session.agency(request).index_url)\n122 else:\n123 session.update(request, origin=reverse(ROUTE_INDEX))\n124 \n125 home = viewmodels.Button.home(request)\n126 # show a more user-friendly message instead of not_found\n127 page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n128 t = loader.get_template(template_name)\n129 \n130 return HttpResponseNotFound(t.render(page.context_dict()))\n131 \n132 \n133 @pageview_decorator\n134 def server_error(request, template_name=\"500.html\"):\n135 \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n136 if session.active_agency(request):\n137 session.update(request, origin=session.agency(request).index_url)\n138 else:\n139 session.update(request, origin=reverse(ROUTE_INDEX))\n140 \n141 home = viewmodels.Button.home(request)\n142 page = viewmodels.ErrorPage.server_error(button=home)\n143 t = loader.get_template(template_name)\n144 \n145 return HttpResponseServerError(t.render(page.context_dict()))\n146 \n147 \n148 def logged_out(request):\n149 \"\"\"View handler for the final log out confirmation message.\"\"\"\n150 page = viewmodels.Page(\n151 title=_(\"core.pages.logged_out.title\"),\n152 icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n153 )\n154 \n155 return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n156\n[end of benefits/core/views.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -49,7 +49,8 @@\n \n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n- headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n+ headline=_(\"core.pages.agency_index.headline%(transit_agency_short_name)s\")\n+ % {\"transit_agency_short_name\": agency.short_name},\n button=button,\n )\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/benefits/core/views.py b/benefits/core/views.py\\n--- a/benefits/core/views.py\\n+++ b/benefits/core/views.py\\n@@ -49,7 +49,8 @@\\n \\n page = viewmodels.Page(\\n title=_(\\\"core.pages.agency_index.title\\\"),\\n- headline=_(\\\"core.pages.agency_index.mst_cc.headline\\\"),\\n+ headline=_(\\\"core.pages.agency_index.headline%(transit_agency_short_name)s\\\")\\n+ % {\\\"transit_agency_short_name\\\": agency.short_name},\\n button=button,\\n )\\n\", \"issue\": \"Refactor Agency dynamic headline into model prop\\nRight now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):\\r\\n\\r\\n```python\\r\\npage = viewmodels.Page(\\r\\n title=_(\\\"core.pages.agency_index.title\\\"),\\r\\n headline=_(\\\"core.pages.agency_index.mst_cc.headline\\\"),\\r\\n button=button,\\r\\n classes=\\\"home\\\",\\r\\n )\\r\\n```\\r\\n\\r\\nThis is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.\\r\\n\\r\\n## Acceptance Criteria\\r\\n\\r\\n\\r\\n\\r\\n- [ ] `agency_index` gets its headline from the selected `agency`\\r\\n\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"\\nThe core application: view definition for the root of the webapp.\\n\\\"\\\"\\\"\\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\\nfrom django.template import loader\\nfrom django.template.response import TemplateResponse\\nfrom django.urls import reverse\\nfrom django.utils.translation import pgettext, gettext as _\\n\\nfrom . import models, session, viewmodels\\nfrom .middleware import pageview_decorator\\n\\nROUTE_INDEX = \\\"core:index\\\"\\nROUTE_ELIGIBILITY = \\\"eligibility:index\\\"\\nROUTE_HELP = \\\"core:help\\\"\\nROUTE_LOGGED_OUT = \\\"core:logged_out\\\"\\n\\nTEMPLATE_INDEX = \\\"core/index.html\\\"\\nTEMPLATE_AGENCY = \\\"core/agency_index.html\\\"\\nTEMPLATE_HELP = \\\"core/help.html\\\"\\nTEMPLATE_LOGGED_OUT = \\\"core/logged_out.html\\\"\\n\\n\\n@pageview_decorator\\ndef index(request):\\n \\\"\\\"\\\"View handler for the main entry page.\\\"\\\"\\\"\\n session.reset(request)\\n\\n page = viewmodels.Page(\\n title=_(\\\"core.pages.index.title\\\"),\\n headline=_(\\\"core.pages.index.headline\\\"),\\n modal=viewmodels.AgencySelector(\\n id=\\\"agency-selector\\\",\\n aria_labelledby_id=\\\"agency-selector-modal-label\\\",\\n button_text=_(\\\"core.pages.index.button\\\"),\\n ),\\n )\\n\\n return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\\n\\n\\n@pageview_decorator\\ndef agency_index(request, agency):\\n \\\"\\\"\\\"View handler for an agency entry page.\\\"\\\"\\\"\\n session.reset(request)\\n session.update(request, agency=agency, origin=agency.index_url)\\n\\n button = viewmodels.Button.primary(text=_(\\\"core.pages.index.continue\\\"), url=reverse(ROUTE_ELIGIBILITY))\\n\\n page = viewmodels.Page(\\n title=_(\\\"core.pages.agency_index.title\\\"),\\n headline=_(\\\"core.pages.agency_index.mst_cc.headline\\\"),\\n button=button,\\n )\\n\\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\\n\\n\\n@pageview_decorator\\ndef agency_public_key(request, agency):\\n \\\"\\\"\\\"View handler returns an agency's public key as plain text.\\\"\\\"\\\"\\n return HttpResponse(agency.public_key_data, content_type=\\\"text/plain\\\")\\n\\n\\n@pageview_decorator\\ndef help(request):\\n \\\"\\\"\\\"View handler for the help page.\\\"\\\"\\\"\\n if session.active_agency(request):\\n agency = session.agency(request)\\n buttons = viewmodels.Button.agency_contact_links(agency)\\n else:\\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\\n\\n buttons.append(viewmodels.Button.home(request, _(\\\"core.buttons.back\\\")))\\n\\n page = viewmodels.Page(\\n title=_(\\\"core.buttons.help\\\"),\\n headline=_(\\\"core.buttons.help\\\"),\\n buttons=buttons,\\n )\\n\\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\\n\\n\\n@pageview_decorator\\ndef bad_request(request, exception, template_name=\\\"400.html\\\"):\\n \\\"\\\"\\\"View handler for HTTP 400 Bad Request responses.\\\"\\\"\\\"\\n if session.active_agency(request):\\n session.update(request, origin=session.agency(request).index_url)\\n else:\\n session.update(request, origin=reverse(ROUTE_INDEX))\\n\\n home = viewmodels.Button.home(request)\\n page = viewmodels.ErrorPage.server_error(button=home)\\n t = loader.get_template(template_name)\\n\\n return HttpResponseBadRequest(t.render(page.context_dict()))\\n\\n\\n@pageview_decorator\\ndef csrf_failure(request, reason):\\n \\\"\\\"\\\"\\n View handler for CSRF_FAILURE_VIEW with custom data.\\n \\\"\\\"\\\"\\n if session.active_agency(request):\\n session.update(request, origin=session.agency(request).index_url)\\n else:\\n session.update(request, origin=reverse(ROUTE_INDEX))\\n\\n home = viewmodels.Button.home(request)\\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\\n t = loader.get_template(\\\"400.html\\\")\\n\\n return HttpResponseNotFound(t.render(page.context_dict()))\\n\\n\\n@pageview_decorator\\ndef page_not_found(request, exception, template_name=\\\"404.html\\\"):\\n \\\"\\\"\\\"View handler for HTTP 404 Not Found responses.\\\"\\\"\\\"\\n if session.active_agency(request):\\n session.update(request, origin=session.agency(request).index_url)\\n else:\\n session.update(request, origin=reverse(ROUTE_INDEX))\\n\\n home = viewmodels.Button.home(request)\\n # show a more user-friendly message instead of not_found\\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\\n t = loader.get_template(template_name)\\n\\n return HttpResponseNotFound(t.render(page.context_dict()))\\n\\n\\n@pageview_decorator\\ndef server_error(request, template_name=\\\"500.html\\\"):\\n \\\"\\\"\\\"View handler for HTTP 500 Server Error responses.\\\"\\\"\\\"\\n if session.active_agency(request):\\n session.update(request, origin=session.agency(request).index_url)\\n else:\\n session.update(request, origin=reverse(ROUTE_INDEX))\\n\\n home = viewmodels.Button.home(request)\\n page = viewmodels.ErrorPage.server_error(button=home)\\n t = loader.get_template(template_name)\\n\\n return HttpResponseServerError(t.render(page.context_dict()))\\n\\n\\ndef logged_out(request):\\n \\\"\\\"\\\"View handler for the final log out confirmation message.\\\"\\\"\\\"\\n page = viewmodels.Page(\\n title=_(\\\"core.pages.logged_out.title\\\"),\\n icon=viewmodels.Icon(\\\"happybus\\\", pgettext(\\\"image alt text\\\", \\\"core.icons.happybus\\\")),\\n )\\n\\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\\n\", \"path\": \"benefits/core/views.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2167,"string":"2,167"},"num_tokens_diff":{"kind":"number","value":123,"string":"123"}}},{"rowIdx":18159,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_13285"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"alltheplaces__alltheplaces-341"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nLa Veneciana (Argentina)\nIce cream shop.\r\n\r\nHTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html\n\n\n\n[start of locations/spiders/laveneciana.py]\n1 import scrapy\n2 import re\n3 from locations.items import GeojsonPointItem\n4 class LavenecianaSpider(scrapy.Spider):\n5 name = \"laveneciana\"\n6 allowed_domains = [\"www.laveneciana.com.ar\"]\n7 download_delay = 0.5\n8 start_urls = (\n9 'http://www.laveneciana.com.ar/sucursales.html',\n10 )\n11 def parse(self, response):\n12 stores = response.xpath('//div[@class=\"navigation-container\"]/div[@id=\"thumbs\"]/ul[@class=\"thumbs noscript\"]/li')\n13 for store in stores:\n14 addr_full_tel = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"image-desc\"]/text())').extract_first()\n15 location = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"ubicacion\"]/iframe/@src)').extract_first()\n16 position = re.findall(r\"ll=[0-9-.,]+\" ,location)\n17 id = re.findall(r\"cid=[0-9]+\" ,location)\n18 if(len(position)>0):\n19 lat =float( position[0][3:].split(',')[0])\n20 lon = float(position[0][3:].split(',')[1])\n21 id = id[0][4:]\n22 else:\n23 lat=''\n24 lon=''\n25 id=''\n26 addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n27 phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n28 if(len(phone_number)>0):\n29 phone_number = phone_number[0]\n30 else:\n31 phone_number =''\n32 if(addr_full!=\"Direccion\"):\n33 properties = {\n34 'addr_full': addr_full,\n35 'phone':phone_number,\n36 'city': '',\n37 'state': '',\n38 'postcode':'',\n39 'ref': id,\n40 'website': response.url,\n41 'lat': lat,\n42 'lon': lon,\n43 }\n44 yield GeojsonPointItem(**properties)\n45\n[end of locations/spiders/laveneciana.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py\n--- a/locations/spiders/laveneciana.py\n+++ b/locations/spiders/laveneciana.py\n@@ -23,13 +23,9 @@\n lat=''\n lon=''\n id=''\n- addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n- phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n- if(len(phone_number)>0):\n- phone_number = phone_number[0]\n- else:\n- phone_number =''\n- if(addr_full!=\"Direccion\"):\n+ addr_full = addr_full_tel.split('Tel.: ')[0]\n+ phone_number = addr_full_tel.split('Tel.: ')[1]\n+ if(addr_full!=\"Direccion... \"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py\\n--- a/locations/spiders/laveneciana.py\\n+++ b/locations/spiders/laveneciana.py\\n@@ -23,13 +23,9 @@\\n lat=''\\n lon=''\\n id=''\\n- addr_full = re.findall(r\\\"^[^()]{4}[^(.)]+\\\" , addr_full_tel)[0]\\n- phone_number = re.findall(r\\\"[0-9]{4}-[0-9]{4}\\\",addr_full_tel)\\n- if(len(phone_number)>0):\\n- phone_number = phone_number[0]\\n- else:\\n- phone_number =''\\n- if(addr_full!=\\\"Direccion\\\"):\\n+ addr_full = addr_full_tel.split('Tel.: ')[0]\\n+ phone_number = addr_full_tel.split('Tel.: ')[1]\\n+ if(addr_full!=\\\"Direccion... \\\"):\\n properties = {\\n 'addr_full': addr_full,\\n 'phone':phone_number,\\n\", \"issue\": \"La Veneciana (Argentina)\\nIce cream shop.\\r\\n\\r\\nHTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html\\n\", \"before_files\": [{\"content\": \"import scrapy\\nimport re\\nfrom locations.items import GeojsonPointItem\\nclass LavenecianaSpider(scrapy.Spider):\\n name = \\\"laveneciana\\\"\\n allowed_domains = [\\\"www.laveneciana.com.ar\\\"]\\n download_delay = 0.5\\n start_urls = (\\n 'http://www.laveneciana.com.ar/sucursales.html',\\n )\\n def parse(self, response):\\n stores = response.xpath('//div[@class=\\\"navigation-container\\\"]/div[@id=\\\"thumbs\\\"]/ul[@class=\\\"thumbs noscript\\\"]/li')\\n for store in stores:\\n addr_full_tel = store.xpath('normalize-space(./div[@class=\\\"caption\\\"]/div[@class=\\\"image-desc\\\"]/text())').extract_first()\\n location = store.xpath('normalize-space(./div[@class=\\\"caption\\\"]/div[@class=\\\"ubicacion\\\"]/iframe/@src)').extract_first()\\n position = re.findall(r\\\"ll=[0-9-.,]+\\\" ,location)\\n id = re.findall(r\\\"cid=[0-9]+\\\" ,location)\\n if(len(position)>0):\\n lat =float( position[0][3:].split(',')[0])\\n lon = float(position[0][3:].split(',')[1])\\n id = id[0][4:]\\n else:\\n lat=''\\n lon=''\\n id=''\\n addr_full = re.findall(r\\\"^[^()]{4}[^(.)]+\\\" , addr_full_tel)[0]\\n phone_number = re.findall(r\\\"[0-9]{4}-[0-9]{4}\\\",addr_full_tel)\\n if(len(phone_number)>0):\\n phone_number = phone_number[0]\\n else:\\n phone_number =''\\n if(addr_full!=\\\"Direccion\\\"):\\n properties = {\\n 'addr_full': addr_full,\\n 'phone':phone_number,\\n 'city': '',\\n 'state': '',\\n 'postcode':'',\\n 'ref': id,\\n 'website': response.url,\\n 'lat': lat,\\n 'lon': lon,\\n }\\n yield GeojsonPointItem(**properties)\\n\", \"path\": \"locations/spiders/laveneciana.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1080,"string":"1,080"},"num_tokens_diff":{"kind":"number","value":219,"string":"219"}}},{"rowIdx":18160,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_60373"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"UTNkar__moore-151"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nParagraph block alignment\n\r\n\r\nSee image:\r\n\r\n![image](https://user-images.githubusercontent.com/29704138/27686692-2cadaf20-5cd4-11e7-92bf-caa849baa276.png)\r\n\r\n[Description of the issue]\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n\r\n\n\n\n\n[start of website/blocks/models.py]\n1 from wagtail.wagtailcore import blocks\n2 from wagtail.wagtailimages.blocks import ImageChooserBlock\n3 \n4 from django.utils.translation import ugettext_lazy as _\n5 \n6 \n7 class CountersBlock(blocks.StructBlock):\n8 title = blocks.CharBlock()\n9 counters = blocks.ListBlock(blocks.StructBlock([\n10 ('icon', blocks.CharBlock(\n11 help_text=_('Material icon font icon text, as found on: '\n12 'https://material.io/icons'),\n13 )),\n14 ('value', blocks.CharBlock()),\n15 ('description', blocks.CharBlock(required=False))\n16 ]))\n17 style = blocks.ChoiceBlock(choices=[\n18 ('light', _('Light')),\n19 ('dark', _('Dark')),\n20 ])\n21 \n22 class Meta:\n23 label = _('Counters')\n24 icon = 'fa-balance-scale'\n25 template = 'blocks/counter.html'\n26 \n27 \n28 class HeadingBlock(blocks.StructBlock):\n29 title = blocks.CharBlock(required=True)\n30 subtitle = blocks.CharBlock(required=False)\n31 \n32 class Meta:\n33 label = _('Heading')\n34 icon = 'fa-header'\n35 template = 'blocks/title.html'\n36 \n37 \n38 class ImageDescriptionBlock(blocks.StructBlock):\n39 description = blocks.RichTextBlock()\n40 image = ImageChooserBlock()\n41 image_alignment = blocks.ChoiceBlock(choices=[\n42 ('left', _('Left')),\n43 ('right', _('Right')),\n44 ])\n45 hide_on_med = blocks.BooleanBlock(required=False)\n46 \n47 class Meta:\n48 label = _('Image + Description')\n49 icon = 'fa-file-image-o '\n50 template = 'blocks/image_description.html'\n51 \n52 \n53 class ImageIconsBlock(blocks.StructBlock):\n54 title = blocks.CharBlock()\n55 image = ImageChooserBlock()\n56 image_alignment = blocks.ChoiceBlock(choices=[\n57 ('left', _('Left')),\n58 ('right', _('Right')),\n59 ])\n60 icons = blocks.ListBlock(blocks.StructBlock([\n61 ('icon', blocks.CharBlock(\n62 help_text=_('Material icon font icon text, as found on: '\n63 'https://material.io/icons'),\n64 )),\n65 ('title', blocks.CharBlock()),\n66 ('description', blocks.CharBlock())\n67 ]))\n68 hide_on_med = blocks.BooleanBlock(required=False)\n69 \n70 class Meta:\n71 label = _('Image + Icons')\n72 icon = 'fa-file-excel-o'\n73 template = 'blocks/image_icons.html'\n74 \n75 \n76 class OverlayBlock(blocks.StructBlock):\n77 image = ImageChooserBlock()\n78 title = blocks.CharBlock(required=False)\n79 description = blocks.CharBlock(required=False)\n80 \n81 link = blocks.URLBlock(required=False)\n82 button = blocks.CharBlock(required=False)\n83 \n84 class Meta:\n85 label = _('Image overlay')\n86 icon = 'fa-clone'\n87 template = 'blocks/overlay.html'\n88 \n89 \n90 WAGTAIL_STATIC_BLOCKTYPES = [\n91 ('heading', HeadingBlock()),\n92 ('paragraph', blocks.RichTextBlock()),\n93 ('image_description', ImageIconsBlock()),\n94 ('image_icons', ImageDescriptionBlock()),\n95 ('overlay', OverlayBlock()),\n96 ('logos', blocks.ListBlock(\n97 ImageChooserBlock(),\n98 icon='fa-pied-piper',\n99 template='blocks/logos.html',\n100 label=_('Logos'),\n101 )),\n102 ('counters', CountersBlock()),\n103 ('image', ImageChooserBlock(template='blocks/image.html')),\n104 ]\n105\n[end of website/blocks/models.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/website/blocks/models.py b/website/blocks/models.py\n--- a/website/blocks/models.py\n+++ b/website/blocks/models.py\n@@ -89,7 +89,7 @@\n \n WAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n- ('paragraph', blocks.RichTextBlock()),\n+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/website/blocks/models.py b/website/blocks/models.py\\n--- a/website/blocks/models.py\\n+++ b/website/blocks/models.py\\n@@ -89,7 +89,7 @@\\n \\n WAGTAIL_STATIC_BLOCKTYPES = [\\n ('heading', HeadingBlock()),\\n- ('paragraph', blocks.RichTextBlock()),\\n+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\\n ('image_description', ImageIconsBlock()),\\n ('image_icons', ImageDescriptionBlock()),\\n ('overlay', OverlayBlock()),\\n\", \"issue\": \"Paragraph block alignment\\n\\r\\n\\r\\nSee image:\\r\\n\\r\\n![image](https://user-images.githubusercontent.com/29704138/27686692-2cadaf20-5cd4-11e7-92bf-caa849baa276.png)\\r\\n\\r\\n[Description of the issue]\\r\\n\\r\\n### Steps to Reproduce\\r\\n\\r\\n1. [First Step]\\r\\n2. [Second Step]\\r\\n3. [and so on...]\\r\\n\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"from wagtail.wagtailcore import blocks\\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\\n\\nfrom django.utils.translation import ugettext_lazy as _\\n\\n\\nclass CountersBlock(blocks.StructBlock):\\n title = blocks.CharBlock()\\n counters = blocks.ListBlock(blocks.StructBlock([\\n ('icon', blocks.CharBlock(\\n help_text=_('Material icon font icon text, as found on: '\\n 'https://material.io/icons'),\\n )),\\n ('value', blocks.CharBlock()),\\n ('description', blocks.CharBlock(required=False))\\n ]))\\n style = blocks.ChoiceBlock(choices=[\\n ('light', _('Light')),\\n ('dark', _('Dark')),\\n ])\\n\\n class Meta:\\n label = _('Counters')\\n icon = 'fa-balance-scale'\\n template = 'blocks/counter.html'\\n\\n\\nclass HeadingBlock(blocks.StructBlock):\\n title = blocks.CharBlock(required=True)\\n subtitle = blocks.CharBlock(required=False)\\n\\n class Meta:\\n label = _('Heading')\\n icon = 'fa-header'\\n template = 'blocks/title.html'\\n\\n\\nclass ImageDescriptionBlock(blocks.StructBlock):\\n description = blocks.RichTextBlock()\\n image = ImageChooserBlock()\\n image_alignment = blocks.ChoiceBlock(choices=[\\n ('left', _('Left')),\\n ('right', _('Right')),\\n ])\\n hide_on_med = blocks.BooleanBlock(required=False)\\n\\n class Meta:\\n label = _('Image + Description')\\n icon = 'fa-file-image-o '\\n template = 'blocks/image_description.html'\\n\\n\\nclass ImageIconsBlock(blocks.StructBlock):\\n title = blocks.CharBlock()\\n image = ImageChooserBlock()\\n image_alignment = blocks.ChoiceBlock(choices=[\\n ('left', _('Left')),\\n ('right', _('Right')),\\n ])\\n icons = blocks.ListBlock(blocks.StructBlock([\\n ('icon', blocks.CharBlock(\\n help_text=_('Material icon font icon text, as found on: '\\n 'https://material.io/icons'),\\n )),\\n ('title', blocks.CharBlock()),\\n ('description', blocks.CharBlock())\\n ]))\\n hide_on_med = blocks.BooleanBlock(required=False)\\n\\n class Meta:\\n label = _('Image + Icons')\\n icon = 'fa-file-excel-o'\\n template = 'blocks/image_icons.html'\\n\\n\\nclass OverlayBlock(blocks.StructBlock):\\n image = ImageChooserBlock()\\n title = blocks.CharBlock(required=False)\\n description = blocks.CharBlock(required=False)\\n\\n link = blocks.URLBlock(required=False)\\n button = blocks.CharBlock(required=False)\\n\\n class Meta:\\n label = _('Image overlay')\\n icon = 'fa-clone'\\n template = 'blocks/overlay.html'\\n\\n\\nWAGTAIL_STATIC_BLOCKTYPES = [\\n ('heading', HeadingBlock()),\\n ('paragraph', blocks.RichTextBlock()),\\n ('image_description', ImageIconsBlock()),\\n ('image_icons', ImageDescriptionBlock()),\\n ('overlay', OverlayBlock()),\\n ('logos', blocks.ListBlock(\\n ImageChooserBlock(),\\n icon='fa-pied-piper',\\n template='blocks/logos.html',\\n label=_('Logos'),\\n )),\\n ('counters', CountersBlock()),\\n ('image', ImageChooserBlock(template='blocks/image.html')),\\n]\\n\", \"path\": \"website/blocks/models.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1555,"string":"1,555"},"num_tokens_diff":{"kind":"number","value":117,"string":"117"}}},{"rowIdx":18161,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_16424"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pyinstaller__pyinstaller-3520"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nlibeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL\nIf you are having errors like:\r\n\r\n```\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\n```\r\n\r\nwith PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\\Qt\\bin\\`) to your output dir or your frozen binary in a similar path.\r\n\r\nIn my final specfile, it looks like this:\r\n\r\n```python\r\n# -*- mode: python -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\remy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\libeay32.dll', 'PyQt5\\\\Qt\\\\bin'), ('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\ssleay32.dll', 'PyQt5\\\\Qt\\\\bin')],\r\n datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\\\Users\\\\remy\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36-32\\\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n name='launcher',\r\n debug=True,\r\n strip=False,\r\n upx=False,\r\n runtime_tmpdir=None,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\n```\r\n\r\nI'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**\n\n\n\n[start of PyInstaller/hooks/hook-PyQt5.QtNetwork.py]\n1 #-----------------------------------------------------------------------------\n2 # Copyright (c) 2013-2018, PyInstaller Development Team.\n3 #\n4 # Distributed under the terms of the GNU General Public License with exception\n5 # for distributing bootloader.\n6 #\n7 # The full license is in the file COPYING.txt, distributed with this software.\n8 #-----------------------------------------------------------------------------\n9 from PyInstaller.utils.hooks import add_qt5_dependencies\n10 \n11 hiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n12\n[end of PyInstaller/hooks/hook-PyQt5.QtNetwork.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n@@ -6,6 +6,21 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-from PyInstaller.utils.hooks import add_qt5_dependencies\n+import os.path\n+\n+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\n+from PyInstaller.compat import is_win\n \n hiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n+\n+# Add libraries needed for SSL. See issue #3520.\n+if is_win:\n+ rel_data_path = ['PyQt5', 'Qt', 'bin']\n+ binaries += [\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'libeay32.dll'),\n+ os.path.join(*rel_data_path)),\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'ssleay32.dll'),\n+ os.path.join(*rel_data_path))\n+ ]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\\n--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\\n+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\\n@@ -6,6 +6,21 @@\\n #\\n # The full license is in the file COPYING.txt, distributed with this software.\\n #-----------------------------------------------------------------------------\\n-from PyInstaller.utils.hooks import add_qt5_dependencies\\n+import os.path\\n+\\n+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\\n+from PyInstaller.compat import is_win\\n \\n hiddenimports, binaries, datas = add_qt5_dependencies(__file__)\\n+\\n+# Add libraries needed for SSL. See issue #3520.\\n+if is_win:\\n+ rel_data_path = ['PyQt5', 'Qt', 'bin']\\n+ binaries += [\\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\\n+ 'libeay32.dll'),\\n+ os.path.join(*rel_data_path)),\\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\\n+ 'ssleay32.dll'),\\n+ os.path.join(*rel_data_path))\\n+ ]\\n\", \"issue\": \"libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL\\nIf you are having errors like:\\r\\n\\r\\n```\\r\\nqt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method\\r\\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new\\r\\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init\\r\\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\\r\\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\\r\\n```\\r\\n\\r\\nwith PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\\\\Qt\\\\bin\\\\`) to your output dir or your frozen binary in a similar path.\\r\\n\\r\\nIn my final specfile, it looks like this:\\r\\n\\r\\n```python\\r\\n# -*- mode: python -*-\\r\\n\\r\\nblock_cipher = None\\r\\n\\r\\n\\r\\na = Analysis(['cddagl\\\\\\\\launcher.py'],\\r\\n pathex=['C:\\\\\\\\Program Files (x86)\\\\\\\\Windows Kits\\\\\\\\10\\\\\\\\Redist\\\\\\\\ucrt\\\\\\\\DLLs\\\\\\\\x86\\\\\\\\', 'C:\\\\\\\\Users\\\\\\\\remy\\\\\\\\Projects\\\\\\\\CDDA-Game-Launcher'],\\r\\n binaries=[('C:\\\\\\\\Users\\\\\\\\remy\\\\\\\\VirtualEnvs\\\\\\\\CDDA-Game-Launcher\\\\\\\\lib\\\\\\\\site-packages\\\\\\\\PyQt5\\\\\\\\Qt\\\\\\\\bin\\\\\\\\libeay32.dll', 'PyQt5\\\\\\\\Qt\\\\\\\\bin'), ('C:\\\\\\\\Users\\\\\\\\remy\\\\\\\\VirtualEnvs\\\\\\\\CDDA-Game-Launcher\\\\\\\\lib\\\\\\\\site-packages\\\\\\\\PyQt5\\\\\\\\Qt\\\\\\\\bin\\\\\\\\ssleay32.dll', 'PyQt5\\\\\\\\Qt\\\\\\\\bin')],\\r\\n datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\\\\\\\Users\\\\\\\\remy\\\\\\\\AppData\\\\\\\\Local\\\\\\\\Programs\\\\\\\\Python\\\\\\\\Python36-32\\\\\\\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\\r\\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\\r\\n hookspath=[],\\r\\n runtime_hooks=[],\\r\\n excludes=[],\\r\\n win_no_prefer_redirects=False,\\r\\n win_private_assemblies=False,\\r\\n cipher=block_cipher)\\r\\npyz = PYZ(a.pure, a.zipped_data,\\r\\n cipher=block_cipher)\\r\\nexe = EXE(pyz,\\r\\n a.scripts,\\r\\n a.binaries,\\r\\n a.zipfiles,\\r\\n a.datas,\\r\\n name='launcher',\\r\\n debug=True,\\r\\n strip=False,\\r\\n upx=False,\\r\\n runtime_tmpdir=None,\\r\\n console=True , icon='cddagl\\\\\\\\resources\\\\\\\\launcher.ico')\\r\\n```\\r\\n\\r\\nI'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**\\n\", \"before_files\": [{\"content\": \"#-----------------------------------------------------------------------------\\n# Copyright (c) 2013-2018, PyInstaller Development Team.\\n#\\n# Distributed under the terms of the GNU General Public License with exception\\n# for distributing bootloader.\\n#\\n# The full license is in the file COPYING.txt, distributed with this software.\\n#-----------------------------------------------------------------------------\\nfrom PyInstaller.utils.hooks import add_qt5_dependencies\\n\\nhiddenimports, binaries, datas = add_qt5_dependencies(__file__)\\n\", \"path\": \"PyInstaller/hooks/hook-PyQt5.QtNetwork.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1404,"string":"1,404"},"num_tokens_diff":{"kind":"number","value":278,"string":"278"}}},{"rowIdx":18162,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_9014"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"stephenmcd__mezzanine-1517"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nDuplicate form fields in admin for user profiles\nAs discussed here:\n\nhttps://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM\n\n\n\n\n[start of mezzanine/accounts/admin.py]\n1 from __future__ import unicode_literals\n2 \n3 from django.contrib import admin\n4 from django.contrib.auth import get_user_model\n5 from mezzanine.accounts import get_profile_model, ProfileNotConfigured\n6 \n7 from mezzanine.core.admin import SitePermissionUserAdmin\n8 from mezzanine.conf import settings\n9 from mezzanine.utils.email import send_approved_mail, send_verification_mail\n10 \n11 \n12 User = get_user_model()\n13 \n14 user_list_display = SitePermissionUserAdmin.list_display\n15 user_list_display += (\"is_active\", \"date_joined\", \"last_login\")\n16 \n17 \n18 class UserProfileAdmin(SitePermissionUserAdmin):\n19 \n20 list_display = user_list_display\n21 \n22 def save_model(self, request, obj, form, change):\n23 \"\"\"\n24 If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\n25 send a notification email to the user being saved if their\n26 ``active`` status has changed to ``True``.\n27 If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\n28 send a verification email instead.\n29 \"\"\"\n30 must_send_verification_mail_after_save = False\n31 if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\n32 if obj.is_active and not User.objects.get(id=obj.id).is_active:\n33 if settings.ACCOUNTS_VERIFICATION_REQUIRED:\n34 # Accounts verification requires an inactive account\n35 obj.is_active = False\n36 # The token generated by send_verification_mail()\n37 # must match the _saved_ User object,\n38 # so postpone send_verification_mail() until later\n39 must_send_verification_mail_after_save = True\n40 else:\n41 send_approved_mail(request, obj)\n42 super(UserProfileAdmin, self).save_model(request, obj, form, change)\n43 if must_send_verification_mail_after_save:\n44 user = User.objects.get(id=obj.id)\n45 send_verification_mail(request, user, \"signup_verify\")\n46 \n47 \n48 try:\n49 class ProfileInline(admin.StackedInline):\n50 model = get_profile_model()\n51 can_delete = False\n52 template = \"admin/profile_inline.html\"\n53 extra = 0\n54 UserProfileAdmin.inlines += (ProfileInline,)\n55 except ProfileNotConfigured:\n56 pass\n57 \n58 \n59 if User in admin.site._registry:\n60 admin.site.unregister(User)\n61 admin.site.register(User, UserProfileAdmin)\n62\n[end of mezzanine/accounts/admin.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py\n--- a/mezzanine/accounts/admin.py\n+++ b/mezzanine/accounts/admin.py\n@@ -51,6 +51,13 @@\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n+\n+ def get_min_num(self, request, obj=None, **kwargs):\n+ \"\"\"This causes profile forms to be shown when editing but hidden\n+ when creating. If min_num is fixed at 1, Django's initial user\n+ creation form fails if the profile model has a required field.\"\"\"\n+ return 0 if obj is None else 1\n+\n UserProfileAdmin.inlines += (ProfileInline,)\n except ProfileNotConfigured:\n pass\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py\\n--- a/mezzanine/accounts/admin.py\\n+++ b/mezzanine/accounts/admin.py\\n@@ -51,6 +51,13 @@\\n can_delete = False\\n template = \\\"admin/profile_inline.html\\\"\\n extra = 0\\n+\\n+ def get_min_num(self, request, obj=None, **kwargs):\\n+ \\\"\\\"\\\"This causes profile forms to be shown when editing but hidden\\n+ when creating. If min_num is fixed at 1, Django's initial user\\n+ creation form fails if the profile model has a required field.\\\"\\\"\\\"\\n+ return 0 if obj is None else 1\\n+\\n UserProfileAdmin.inlines += (ProfileInline,)\\n except ProfileNotConfigured:\\n pass\\n\", \"issue\": \"Duplicate form fields in admin for user profiles\\nAs discussed here:\\n\\nhttps://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM\\n\\n\", \"before_files\": [{\"content\": \"from __future__ import unicode_literals\\n\\nfrom django.contrib import admin\\nfrom django.contrib.auth import get_user_model\\nfrom mezzanine.accounts import get_profile_model, ProfileNotConfigured\\n\\nfrom mezzanine.core.admin import SitePermissionUserAdmin\\nfrom mezzanine.conf import settings\\nfrom mezzanine.utils.email import send_approved_mail, send_verification_mail\\n\\n\\nUser = get_user_model()\\n\\nuser_list_display = SitePermissionUserAdmin.list_display\\nuser_list_display += (\\\"is_active\\\", \\\"date_joined\\\", \\\"last_login\\\")\\n\\n\\nclass UserProfileAdmin(SitePermissionUserAdmin):\\n\\n list_display = user_list_display\\n\\n def save_model(self, request, obj, form, change):\\n \\\"\\\"\\\"\\n If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\\n send a notification email to the user being saved if their\\n ``active`` status has changed to ``True``.\\n If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\\n send a verification email instead.\\n \\\"\\\"\\\"\\n must_send_verification_mail_after_save = False\\n if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\\n if obj.is_active and not User.objects.get(id=obj.id).is_active:\\n if settings.ACCOUNTS_VERIFICATION_REQUIRED:\\n # Accounts verification requires an inactive account\\n obj.is_active = False\\n # The token generated by send_verification_mail()\\n # must match the _saved_ User object,\\n # so postpone send_verification_mail() until later\\n must_send_verification_mail_after_save = True\\n else:\\n send_approved_mail(request, obj)\\n super(UserProfileAdmin, self).save_model(request, obj, form, change)\\n if must_send_verification_mail_after_save:\\n user = User.objects.get(id=obj.id)\\n send_verification_mail(request, user, \\\"signup_verify\\\")\\n\\n\\ntry:\\n class ProfileInline(admin.StackedInline):\\n model = get_profile_model()\\n can_delete = False\\n template = \\\"admin/profile_inline.html\\\"\\n extra = 0\\n UserProfileAdmin.inlines += (ProfileInline,)\\nexcept ProfileNotConfigured:\\n pass\\n\\n\\nif User in admin.site._registry:\\n admin.site.unregister(User)\\nadmin.site.register(User, UserProfileAdmin)\\n\", \"path\": \"mezzanine/accounts/admin.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1156,"string":"1,156"},"num_tokens_diff":{"kind":"number","value":168,"string":"168"}}},{"rowIdx":18163,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_2536"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"optuna__optuna-122"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\n`TPESampler._sample_categorical` fails with PostgreSQL backend\n`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:\r\n- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.\r\n- The integer value is input to storage class without any cast.\r\n- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.\r\n\r\n**Repro Steps**\r\nWith any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:\r\n```\r\nstudy = pfnopt.create_study(storage=SOME_POSTGRES_URL)\r\npfnopt.minimize(objective, n_trials=100, study=study)\r\n```\r\n\r\nIt fails after running trials `n_startup_trails` times.\n\n\n\n[start of pfnopt/samplers/tpe.py]\n1 import math\n2 import numpy\n3 from typing import List # NOQA\n4 from typing import Optional # NOQA\n5 \n6 from pfnopt import distributions # NOQA\n7 from pfnopt.samplers import _hyperopt\n8 from pfnopt.samplers import base\n9 from pfnopt.samplers import random\n10 from pfnopt.storages.base import BaseStorage # NOQA\n11 \n12 \n13 class TPESampler(base.BaseSampler):\n14 \n15 def __init__(self,\n16 prior_weight=_hyperopt.default_prior_weight,\n17 n_startup_trials=_hyperopt.default_n_startup_trials,\n18 n_ei_candidates=_hyperopt.default_n_ei_candidates,\n19 gamma=_hyperopt.default_gamma,\n20 seed=None):\n21 # type: (float, int, int, float, Optional[int]) -> None\n22 self.prior_weight = prior_weight\n23 self.n_startup_trials = n_startup_trials\n24 self.n_ei_candidates = n_ei_candidates\n25 self.gamma = gamma\n26 self.seed = seed\n27 \n28 self.rng = numpy.random.RandomState(seed)\n29 self.random_sampler = random.RandomSampler(seed=seed)\n30 \n31 def sample(self, storage, study_id, param_name, param_distribution):\n32 # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\n33 observation_pairs = storage.get_trial_param_result_pairs(\n34 study_id, param_name)\n35 n = len(observation_pairs)\n36 \n37 # TODO(Akiba): this behavior is slightly different from hyperopt\n38 if n < self.n_startup_trials:\n39 return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\n40 \n41 below_param_values, above_param_values = _hyperopt.ap_filter_trials(\n42 range(n), [p[0] for p in observation_pairs],\n43 range(n), [p[1] for p in observation_pairs],\n44 self.gamma)\n45 \n46 if isinstance(param_distribution, distributions.UniformDistribution):\n47 return self._sample_uniform(\n48 param_distribution, below_param_values, above_param_values)\n49 elif isinstance(param_distribution, distributions.LogUniformDistribution):\n50 return self._sample_loguniform(\n51 param_distribution, below_param_values, above_param_values)\n52 elif isinstance(param_distribution, distributions.CategoricalDistribution):\n53 return self._sample_categorical(\n54 param_distribution, below_param_values, above_param_values)\n55 else:\n56 raise NotImplementedError\n57 \n58 def _sample_uniform(self, distribution, below, above):\n59 # type: (distributions.UniformDistribution, List[float], List[float]) -> float\n60 return _hyperopt.sample_uniform(\n61 obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n62 low=distribution.low, high=distribution.high,\n63 size=(self.n_ei_candidates,), rng=self.rng)\n64 \n65 def _sample_loguniform(self, distribution, below, above):\n66 # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\n67 \n68 return _hyperopt.sample_loguniform(\n69 obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n70 # `sample_loguniform` generates values in [exp(low), exp(high)]\n71 low=math.log(distribution.low),\n72 high=math.log(distribution.high),\n73 size=(self.n_ei_candidates,), rng=self.rng)\n74 \n75 def _sample_categorical(self, distribution, below, above):\n76 # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\n77 choices = distribution.choices\n78 below = list(map(int, below))\n79 above = list(map(int, above))\n80 idx = _hyperopt.sample_categorical(\n81 obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n82 upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n83 return idx\n84\n[end of pfnopt/samplers/tpe.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py\n--- a/pfnopt/samplers/tpe.py\n+++ b/pfnopt/samplers/tpe.py\n@@ -80,4 +80,4 @@\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n- return idx\n+ return int(idx)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py\\n--- a/pfnopt/samplers/tpe.py\\n+++ b/pfnopt/samplers/tpe.py\\n@@ -80,4 +80,4 @@\\n idx = _hyperopt.sample_categorical(\\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\\n- return idx\\n+ return int(idx)\\n\", \"issue\": \"`TPESampler._sample_categorical` fails with PostgreSQL backend\\n`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:\\r\\n- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.\\r\\n- The integer value is input to storage class without any cast.\\r\\n- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.\\r\\n\\r\\n**Repro Steps**\\r\\nWith any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:\\r\\n```\\r\\nstudy = pfnopt.create_study(storage=SOME_POSTGRES_URL)\\r\\npfnopt.minimize(objective, n_trials=100, study=study)\\r\\n```\\r\\n\\r\\nIt fails after running trials `n_startup_trails` times.\\n\", \"before_files\": [{\"content\": \"import math\\nimport numpy\\nfrom typing import List # NOQA\\nfrom typing import Optional # NOQA\\n\\nfrom pfnopt import distributions # NOQA\\nfrom pfnopt.samplers import _hyperopt\\nfrom pfnopt.samplers import base\\nfrom pfnopt.samplers import random\\nfrom pfnopt.storages.base import BaseStorage # NOQA\\n\\n\\nclass TPESampler(base.BaseSampler):\\n\\n def __init__(self,\\n prior_weight=_hyperopt.default_prior_weight,\\n n_startup_trials=_hyperopt.default_n_startup_trials,\\n n_ei_candidates=_hyperopt.default_n_ei_candidates,\\n gamma=_hyperopt.default_gamma,\\n seed=None):\\n # type: (float, int, int, float, Optional[int]) -> None\\n self.prior_weight = prior_weight\\n self.n_startup_trials = n_startup_trials\\n self.n_ei_candidates = n_ei_candidates\\n self.gamma = gamma\\n self.seed = seed\\n\\n self.rng = numpy.random.RandomState(seed)\\n self.random_sampler = random.RandomSampler(seed=seed)\\n\\n def sample(self, storage, study_id, param_name, param_distribution):\\n # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\\n observation_pairs = storage.get_trial_param_result_pairs(\\n study_id, param_name)\\n n = len(observation_pairs)\\n\\n # TODO(Akiba): this behavior is slightly different from hyperopt\\n if n < self.n_startup_trials:\\n return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\\n\\n below_param_values, above_param_values = _hyperopt.ap_filter_trials(\\n range(n), [p[0] for p in observation_pairs],\\n range(n), [p[1] for p in observation_pairs],\\n self.gamma)\\n\\n if isinstance(param_distribution, distributions.UniformDistribution):\\n return self._sample_uniform(\\n param_distribution, below_param_values, above_param_values)\\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\\n return self._sample_loguniform(\\n param_distribution, below_param_values, above_param_values)\\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\\n return self._sample_categorical(\\n param_distribution, below_param_values, above_param_values)\\n else:\\n raise NotImplementedError\\n\\n def _sample_uniform(self, distribution, below, above):\\n # type: (distributions.UniformDistribution, List[float], List[float]) -> float\\n return _hyperopt.sample_uniform(\\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\\n low=distribution.low, high=distribution.high,\\n size=(self.n_ei_candidates,), rng=self.rng)\\n\\n def _sample_loguniform(self, distribution, below, above):\\n # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\\n\\n return _hyperopt.sample_loguniform(\\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\\n # `sample_loguniform` generates values in [exp(low), exp(high)]\\n low=math.log(distribution.low),\\n high=math.log(distribution.high),\\n size=(self.n_ei_candidates,), rng=self.rng)\\n\\n def _sample_categorical(self, distribution, below, above):\\n # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\\n choices = distribution.choices\\n below = list(map(int, below))\\n above = list(map(int, above))\\n idx = _hyperopt.sample_categorical(\\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\\n return idx\\n\", \"path\": \"pfnopt/samplers/tpe.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1665,"string":"1,665"},"num_tokens_diff":{"kind":"number","value":121,"string":"121"}}},{"rowIdx":18164,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_34056"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"modin-project__modin-2806"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\n[ASV] use `wait` function to get the right performance times\n\n\n\n\n[start of asv_bench/benchmarks/scalability/scalability_benchmarks.py]\n1 # Licensed to Modin Development Team under one or more contributor license agreements.\n2 # See the NOTICE file distributed with this work for additional information regarding\n3 # copyright ownership. The Modin Development Team licenses this file to you under the\n4 # Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n5 # compliance with the License. You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software distributed under\n10 # the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n11 # ANY KIND, either express or implied. See the License for the specific language\n12 # governing permissions and limitations under the License.\n13 \n14 import modin.pandas as pd\n15 from modin.pandas.utils import from_pandas\n16 from modin.utils import to_pandas\n17 import pandas\n18 \n19 from ..utils import (\n20 gen_data,\n21 generate_dataframe,\n22 RAND_LOW,\n23 RAND_HIGH,\n24 ASV_DATASET_SIZE,\n25 UNARY_OP_DATA_SIZE,\n26 execute,\n27 )\n28 \n29 \n30 class TimeFromPandas:\n31 param_names = [\"shape\", \"cpus\"]\n32 params = [\n33 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n34 [4, 16, 32],\n35 ]\n36 \n37 def setup(self, shape, cpus):\n38 self.data = pandas.DataFrame(gen_data(\"int\", *shape, RAND_LOW, RAND_HIGH))\n39 from modin.config import NPartitions\n40 \n41 NPartitions.get = lambda: cpus\n42 # trigger ray init\n43 pd.DataFrame([])\n44 \n45 def time_from_pandas(self, shape, cpus):\n46 execute(from_pandas(self.data))\n47 \n48 \n49 class TimeToPandas:\n50 param_names = [\"shape\", \"cpus\"]\n51 params = [\n52 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n53 [4, 16, 32],\n54 ]\n55 \n56 def setup(self, shape, cpus):\n57 from modin.config import NPartitions\n58 \n59 NPartitions.get = lambda: cpus\n60 self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n61 \n62 def time_to_pandas(self, shape, cpus):\n63 execute(to_pandas(self.data))\n64\n[end of asv_bench/benchmarks/scalability/scalability_benchmarks.py]\n[start of asv_bench/benchmarks/utils.py]\n1 # Licensed to Modin Development Team under one or more contributor license agreements.\n2 # See the NOTICE file distributed with this work for additional information regarding\n3 # copyright ownership. The Modin Development Team licenses this file to you under the\n4 # Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n5 # compliance with the License. You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software distributed under\n10 # the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n11 # ANY KIND, either express or implied. See the License for the specific language\n12 # governing permissions and limitations under the License.\n13 \n14 import os\n15 import logging\n16 import modin.pandas as pd\n17 import pandas\n18 import numpy as np\n19 import uuid\n20 \n21 RAND_LOW = 0\n22 RAND_HIGH = 100\n23 random_state = np.random.RandomState(seed=42)\n24 \n25 \n26 try:\n27 from modin.config import NPartitions\n28 \n29 NPARTITIONS = NPartitions.get()\n30 except ImportError:\n31 NPARTITIONS = pd.DEFAULT_NPARTITIONS\n32 \n33 try:\n34 from modin.config import TestDatasetSize, AsvImplementation\n35 \n36 ASV_USE_IMPL = AsvImplementation.get()\n37 ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\n38 except ImportError:\n39 # The same benchmarking code can be run for different versions of Modin, so in\n40 # case of an error importing important variables, we'll just use predefined values\n41 ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n42 ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n43 \n44 assert ASV_USE_IMPL in (\"modin\", \"pandas\")\n45 \n46 BINARY_OP_DATA_SIZE = {\n47 \"Big\": [\n48 ((5000, 5000), (5000, 5000)),\n49 # the case extremely inefficient\n50 # ((20, 500_000), (10, 1_000_000)),\n51 ((500_000, 20), (1_000_000, 10)),\n52 ],\n53 \"Small\": [\n54 ((250, 250), (250, 250)),\n55 ((20, 10_000), (10, 25_000)),\n56 ((10_000, 20), (25_000, 10)),\n57 ],\n58 }\n59 \n60 UNARY_OP_DATA_SIZE = {\n61 \"Big\": [\n62 (5000, 5000),\n63 # the case extremely inefficient\n64 # (10, 1_000_000),\n65 (1_000_000, 10),\n66 ],\n67 \"Small\": [\n68 (250, 250),\n69 (10, 10_000),\n70 (10_000, 10),\n71 ],\n72 }\n73 \n74 GROUPBY_NGROUPS = {\n75 \"Big\": [100, \"huge_amount_groups\"],\n76 \"Small\": [5],\n77 }\n78 \n79 IMPL = {\n80 \"modin\": pd,\n81 \"pandas\": pandas,\n82 }\n83 \n84 \n85 def translator_groupby_ngroups(groupby_ngroups, shape):\n86 if ASV_DATASET_SIZE == \"Big\":\n87 if groupby_ngroups == \"huge_amount_groups\":\n88 return min(shape[0] // 2, 5000)\n89 return groupby_ngroups\n90 else:\n91 return groupby_ngroups\n92 \n93 \n94 class weakdict(dict):\n95 __slots__ = (\"__weakref__\",)\n96 \n97 \n98 data_cache = dict()\n99 dataframes_cache = dict()\n100 \n101 \n102 def gen_int_data(nrows, ncols, rand_low, rand_high):\n103 cache_key = (\"int\", nrows, ncols, rand_low, rand_high)\n104 if cache_key in data_cache:\n105 return data_cache[cache_key]\n106 \n107 logging.info(\n108 \"Generating int data {} rows and {} columns [{}-{}]\".format(\n109 nrows, ncols, rand_low, rand_high\n110 )\n111 )\n112 data = {\n113 \"col{}\".format(i): random_state.randint(rand_low, rand_high, size=(nrows))\n114 for i in range(ncols)\n115 }\n116 data_cache[cache_key] = weakdict(data)\n117 return data\n118 \n119 \n120 def gen_str_int_data(nrows, ncols, rand_low, rand_high):\n121 cache_key = (\"str_int\", nrows, ncols, rand_low, rand_high)\n122 if cache_key in data_cache:\n123 return data_cache[cache_key]\n124 \n125 logging.info(\n126 \"Generating str_int data {} rows and {} columns [{}-{}]\".format(\n127 nrows, ncols, rand_low, rand_high\n128 )\n129 )\n130 data = gen_int_data(nrows, ncols, rand_low, rand_high).copy()\n131 data[\"gb_col\"] = [\n132 \"str_{}\".format(random_state.randint(rand_low, rand_high)) for i in range(nrows)\n133 ]\n134 data_cache[cache_key] = weakdict(data)\n135 return data\n136 \n137 \n138 def gen_data(data_type, nrows, ncols, rand_low, rand_high):\n139 if data_type == \"int\":\n140 return gen_int_data(nrows, ncols, rand_low, rand_high)\n141 elif data_type == \"str_int\":\n142 return gen_str_int_data(nrows, ncols, rand_low, rand_high)\n143 else:\n144 assert False\n145 \n146 \n147 def generate_dataframe(\n148 impl,\n149 data_type,\n150 nrows,\n151 ncols,\n152 rand_low,\n153 rand_high,\n154 groupby_ncols=None,\n155 count_groups=None,\n156 ):\n157 assert not (\n158 (groupby_ncols is None) ^ (count_groups is None)\n159 ), \"You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them.\"\n160 \n161 if groupby_ncols and count_groups:\n162 ncols -= groupby_ncols\n163 cache_key = (\n164 impl,\n165 data_type,\n166 nrows,\n167 ncols,\n168 rand_low,\n169 rand_high,\n170 groupby_ncols,\n171 count_groups,\n172 )\n173 else:\n174 cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high)\n175 \n176 if cache_key in dataframes_cache:\n177 return dataframes_cache[cache_key]\n178 \n179 logging.info(\n180 \"Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]\".format(\n181 impl, data_type, nrows, ncols, rand_low, rand_high\n182 )\n183 )\n184 data = gen_data(data_type, nrows, ncols, rand_low, rand_high)\n185 \n186 if groupby_ncols and count_groups:\n187 groupby_columns = [f\"groupby_col{x}\" for x in range(groupby_ncols)]\n188 for groupby_col in groupby_columns:\n189 data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups)\n190 \n191 if impl == \"modin\":\n192 df = pd.DataFrame(data)\n193 elif impl == \"pandas\":\n194 df = pandas.DataFrame(data)\n195 else:\n196 assert False\n197 \n198 if groupby_ncols and count_groups:\n199 dataframes_cache[cache_key] = df, groupby_columns\n200 return df, groupby_columns\n201 \n202 dataframes_cache[cache_key] = df\n203 return df\n204 \n205 \n206 def random_string():\n207 return str(uuid.uuid1())\n208 \n209 \n210 def random_columns(df_columns, columns_number):\n211 return list(random_state.choice(df_columns, size=columns_number))\n212 \n213 \n214 def random_booleans(number):\n215 return list(random_state.choice([True, False], size=number))\n216 \n217 \n218 def execute(df):\n219 \"Make sure the calculations are done.\"\n220 return df.shape, df.dtypes\n221 \n222 \n223 def get_shape_id(array):\n224 return \"_\".join([str(element) for element in array])\n225\n[end of asv_bench/benchmarks/utils.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/asv_bench/benchmarks/scalability/scalability_benchmarks.py b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n--- a/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n+++ b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n@@ -60,4 +60,5 @@\n self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n \n def time_to_pandas(self, shape, cpus):\n- execute(to_pandas(self.data))\n+ # to_pandas is already synchronous\n+ to_pandas(self.data)\ndiff --git a/asv_bench/benchmarks/utils.py b/asv_bench/benchmarks/utils.py\n--- a/asv_bench/benchmarks/utils.py\n+++ b/asv_bench/benchmarks/utils.py\n@@ -31,15 +31,17 @@\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\n \n try:\n- from modin.config import TestDatasetSize, AsvImplementation\n+ from modin.config import TestDatasetSize, AsvImplementation, Engine\n \n ASV_USE_IMPL = AsvImplementation.get()\n ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\n+ ASV_USE_ENGINE = Engine.get()\n except ImportError:\n # The same benchmarking code can be run for different versions of Modin, so in\n # case of an error importing important variables, we'll just use predefined values\n ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n+ ASV_USE_ENGINE = os.environ.get(\"MODIN_ENGINE\", \"Ray\")\n \n assert ASV_USE_IMPL in (\"modin\", \"pandas\")\n \n@@ -217,7 +219,24 @@\n \n def execute(df):\n \"Make sure the calculations are done.\"\n- return df.shape, df.dtypes\n+ if ASV_USE_IMPL == \"modin\":\n+ partitions = df._query_compiler._modin_frame._partitions\n+ map(lambda partition: partition.drain_call_queue(), partitions)\n+ if ASV_USE_ENGINE == \"Ray\":\n+ from ray import wait\n+\n+ map(lambda partition: wait(partition.oid), partitions)\n+ elif ASV_USE_ENGINE == \"Dask\":\n+ from dask.distributed import wait\n+\n+ map(lambda partition: wait(partition.future), partitions)\n+ elif ASV_USE_ENGINE == \"Python\":\n+ pass\n+\n+ elif ASV_USE_IMPL == \"pandas\":\n+ pass\n+ else:\n+ raise ValueError(f\"wrong value of {ASV_USE_IMPL}\")\n \n \n def get_shape_id(array):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/asv_bench/benchmarks/scalability/scalability_benchmarks.py b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\\n--- a/asv_bench/benchmarks/scalability/scalability_benchmarks.py\\n+++ b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\\n@@ -60,4 +60,5 @@\\n self.data = generate_dataframe(\\\"modin\\\", \\\"int\\\", *shape, RAND_LOW, RAND_HIGH)\\n \\n def time_to_pandas(self, shape, cpus):\\n- execute(to_pandas(self.data))\\n+ # to_pandas is already synchronous\\n+ to_pandas(self.data)\\ndiff --git a/asv_bench/benchmarks/utils.py b/asv_bench/benchmarks/utils.py\\n--- a/asv_bench/benchmarks/utils.py\\n+++ b/asv_bench/benchmarks/utils.py\\n@@ -31,15 +31,17 @@\\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\\n \\n try:\\n- from modin.config import TestDatasetSize, AsvImplementation\\n+ from modin.config import TestDatasetSize, AsvImplementation, Engine\\n \\n ASV_USE_IMPL = AsvImplementation.get()\\n ASV_DATASET_SIZE = TestDatasetSize.get() or \\\"Small\\\"\\n+ ASV_USE_ENGINE = Engine.get()\\n except ImportError:\\n # The same benchmarking code can be run for different versions of Modin, so in\\n # case of an error importing important variables, we'll just use predefined values\\n ASV_USE_IMPL = os.environ.get(\\\"MODIN_ASV_USE_IMPL\\\", \\\"modin\\\")\\n ASV_DATASET_SIZE = os.environ.get(\\\"MODIN_TEST_DATASET_SIZE\\\", \\\"Small\\\")\\n+ ASV_USE_ENGINE = os.environ.get(\\\"MODIN_ENGINE\\\", \\\"Ray\\\")\\n \\n assert ASV_USE_IMPL in (\\\"modin\\\", \\\"pandas\\\")\\n \\n@@ -217,7 +219,24 @@\\n \\n def execute(df):\\n \\\"Make sure the calculations are done.\\\"\\n- return df.shape, df.dtypes\\n+ if ASV_USE_IMPL == \\\"modin\\\":\\n+ partitions = df._query_compiler._modin_frame._partitions\\n+ map(lambda partition: partition.drain_call_queue(), partitions)\\n+ if ASV_USE_ENGINE == \\\"Ray\\\":\\n+ from ray import wait\\n+\\n+ map(lambda partition: wait(partition.oid), partitions)\\n+ elif ASV_USE_ENGINE == \\\"Dask\\\":\\n+ from dask.distributed import wait\\n+\\n+ map(lambda partition: wait(partition.future), partitions)\\n+ elif ASV_USE_ENGINE == \\\"Python\\\":\\n+ pass\\n+\\n+ elif ASV_USE_IMPL == \\\"pandas\\\":\\n+ pass\\n+ else:\\n+ raise ValueError(f\\\"wrong value of {ASV_USE_IMPL}\\\")\\n \\n \\n def get_shape_id(array):\\n\", \"issue\": \"[ASV] use `wait` function to get the right performance times\\n\\n\", \"before_files\": [{\"content\": \"# Licensed to Modin Development Team under one or more contributor license agreements.\\n# See the NOTICE file distributed with this work for additional information regarding\\n# copyright ownership. The Modin Development Team licenses this file to you under the\\n# Apache License, Version 2.0 (the \\\"License\\\"); you may not use this file except in\\n# compliance with the License. You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software distributed under\\n# the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\\n# ANY KIND, either express or implied. See the License for the specific language\\n# governing permissions and limitations under the License.\\n\\nimport modin.pandas as pd\\nfrom modin.pandas.utils import from_pandas\\nfrom modin.utils import to_pandas\\nimport pandas\\n\\nfrom ..utils import (\\n gen_data,\\n generate_dataframe,\\n RAND_LOW,\\n RAND_HIGH,\\n ASV_DATASET_SIZE,\\n UNARY_OP_DATA_SIZE,\\n execute,\\n)\\n\\n\\nclass TimeFromPandas:\\n param_names = [\\\"shape\\\", \\\"cpus\\\"]\\n params = [\\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\\n [4, 16, 32],\\n ]\\n\\n def setup(self, shape, cpus):\\n self.data = pandas.DataFrame(gen_data(\\\"int\\\", *shape, RAND_LOW, RAND_HIGH))\\n from modin.config import NPartitions\\n\\n NPartitions.get = lambda: cpus\\n # trigger ray init\\n pd.DataFrame([])\\n\\n def time_from_pandas(self, shape, cpus):\\n execute(from_pandas(self.data))\\n\\n\\nclass TimeToPandas:\\n param_names = [\\\"shape\\\", \\\"cpus\\\"]\\n params = [\\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\\n [4, 16, 32],\\n ]\\n\\n def setup(self, shape, cpus):\\n from modin.config import NPartitions\\n\\n NPartitions.get = lambda: cpus\\n self.data = generate_dataframe(\\\"modin\\\", \\\"int\\\", *shape, RAND_LOW, RAND_HIGH)\\n\\n def time_to_pandas(self, shape, cpus):\\n execute(to_pandas(self.data))\\n\", \"path\": \"asv_bench/benchmarks/scalability/scalability_benchmarks.py\"}, {\"content\": \"# Licensed to Modin Development Team under one or more contributor license agreements.\\n# See the NOTICE file distributed with this work for additional information regarding\\n# copyright ownership. The Modin Development Team licenses this file to you under the\\n# Apache License, Version 2.0 (the \\\"License\\\"); you may not use this file except in\\n# compliance with the License. You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software distributed under\\n# the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\\n# ANY KIND, either express or implied. See the License for the specific language\\n# governing permissions and limitations under the License.\\n\\nimport os\\nimport logging\\nimport modin.pandas as pd\\nimport pandas\\nimport numpy as np\\nimport uuid\\n\\nRAND_LOW = 0\\nRAND_HIGH = 100\\nrandom_state = np.random.RandomState(seed=42)\\n\\n\\ntry:\\n from modin.config import NPartitions\\n\\n NPARTITIONS = NPartitions.get()\\nexcept ImportError:\\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\\n\\ntry:\\n from modin.config import TestDatasetSize, AsvImplementation\\n\\n ASV_USE_IMPL = AsvImplementation.get()\\n ASV_DATASET_SIZE = TestDatasetSize.get() or \\\"Small\\\"\\nexcept ImportError:\\n # The same benchmarking code can be run for different versions of Modin, so in\\n # case of an error importing important variables, we'll just use predefined values\\n ASV_USE_IMPL = os.environ.get(\\\"MODIN_ASV_USE_IMPL\\\", \\\"modin\\\")\\n ASV_DATASET_SIZE = os.environ.get(\\\"MODIN_TEST_DATASET_SIZE\\\", \\\"Small\\\")\\n\\nassert ASV_USE_IMPL in (\\\"modin\\\", \\\"pandas\\\")\\n\\nBINARY_OP_DATA_SIZE = {\\n \\\"Big\\\": [\\n ((5000, 5000), (5000, 5000)),\\n # the case extremely inefficient\\n # ((20, 500_000), (10, 1_000_000)),\\n ((500_000, 20), (1_000_000, 10)),\\n ],\\n \\\"Small\\\": [\\n ((250, 250), (250, 250)),\\n ((20, 10_000), (10, 25_000)),\\n ((10_000, 20), (25_000, 10)),\\n ],\\n}\\n\\nUNARY_OP_DATA_SIZE = {\\n \\\"Big\\\": [\\n (5000, 5000),\\n # the case extremely inefficient\\n # (10, 1_000_000),\\n (1_000_000, 10),\\n ],\\n \\\"Small\\\": [\\n (250, 250),\\n (10, 10_000),\\n (10_000, 10),\\n ],\\n}\\n\\nGROUPBY_NGROUPS = {\\n \\\"Big\\\": [100, \\\"huge_amount_groups\\\"],\\n \\\"Small\\\": [5],\\n}\\n\\nIMPL = {\\n \\\"modin\\\": pd,\\n \\\"pandas\\\": pandas,\\n}\\n\\n\\ndef translator_groupby_ngroups(groupby_ngroups, shape):\\n if ASV_DATASET_SIZE == \\\"Big\\\":\\n if groupby_ngroups == \\\"huge_amount_groups\\\":\\n return min(shape[0] // 2, 5000)\\n return groupby_ngroups\\n else:\\n return groupby_ngroups\\n\\n\\nclass weakdict(dict):\\n __slots__ = (\\\"__weakref__\\\",)\\n\\n\\ndata_cache = dict()\\ndataframes_cache = dict()\\n\\n\\ndef gen_int_data(nrows, ncols, rand_low, rand_high):\\n cache_key = (\\\"int\\\", nrows, ncols, rand_low, rand_high)\\n if cache_key in data_cache:\\n return data_cache[cache_key]\\n\\n logging.info(\\n \\\"Generating int data {} rows and {} columns [{}-{}]\\\".format(\\n nrows, ncols, rand_low, rand_high\\n )\\n )\\n data = {\\n \\\"col{}\\\".format(i): random_state.randint(rand_low, rand_high, size=(nrows))\\n for i in range(ncols)\\n }\\n data_cache[cache_key] = weakdict(data)\\n return data\\n\\n\\ndef gen_str_int_data(nrows, ncols, rand_low, rand_high):\\n cache_key = (\\\"str_int\\\", nrows, ncols, rand_low, rand_high)\\n if cache_key in data_cache:\\n return data_cache[cache_key]\\n\\n logging.info(\\n \\\"Generating str_int data {} rows and {} columns [{}-{}]\\\".format(\\n nrows, ncols, rand_low, rand_high\\n )\\n )\\n data = gen_int_data(nrows, ncols, rand_low, rand_high).copy()\\n data[\\\"gb_col\\\"] = [\\n \\\"str_{}\\\".format(random_state.randint(rand_low, rand_high)) for i in range(nrows)\\n ]\\n data_cache[cache_key] = weakdict(data)\\n return data\\n\\n\\ndef gen_data(data_type, nrows, ncols, rand_low, rand_high):\\n if data_type == \\\"int\\\":\\n return gen_int_data(nrows, ncols, rand_low, rand_high)\\n elif data_type == \\\"str_int\\\":\\n return gen_str_int_data(nrows, ncols, rand_low, rand_high)\\n else:\\n assert False\\n\\n\\ndef generate_dataframe(\\n impl,\\n data_type,\\n nrows,\\n ncols,\\n rand_low,\\n rand_high,\\n groupby_ncols=None,\\n count_groups=None,\\n):\\n assert not (\\n (groupby_ncols is None) ^ (count_groups is None)\\n ), \\\"You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them.\\\"\\n\\n if groupby_ncols and count_groups:\\n ncols -= groupby_ncols\\n cache_key = (\\n impl,\\n data_type,\\n nrows,\\n ncols,\\n rand_low,\\n rand_high,\\n groupby_ncols,\\n count_groups,\\n )\\n else:\\n cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high)\\n\\n if cache_key in dataframes_cache:\\n return dataframes_cache[cache_key]\\n\\n logging.info(\\n \\\"Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]\\\".format(\\n impl, data_type, nrows, ncols, rand_low, rand_high\\n )\\n )\\n data = gen_data(data_type, nrows, ncols, rand_low, rand_high)\\n\\n if groupby_ncols and count_groups:\\n groupby_columns = [f\\\"groupby_col{x}\\\" for x in range(groupby_ncols)]\\n for groupby_col in groupby_columns:\\n data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups)\\n\\n if impl == \\\"modin\\\":\\n df = pd.DataFrame(data)\\n elif impl == \\\"pandas\\\":\\n df = pandas.DataFrame(data)\\n else:\\n assert False\\n\\n if groupby_ncols and count_groups:\\n dataframes_cache[cache_key] = df, groupby_columns\\n return df, groupby_columns\\n\\n dataframes_cache[cache_key] = df\\n return df\\n\\n\\ndef random_string():\\n return str(uuid.uuid1())\\n\\n\\ndef random_columns(df_columns, columns_number):\\n return list(random_state.choice(df_columns, size=columns_number))\\n\\n\\ndef random_booleans(number):\\n return list(random_state.choice([True, False], size=number))\\n\\n\\ndef execute(df):\\n \\\"Make sure the calculations are done.\\\"\\n return df.shape, df.dtypes\\n\\n\\ndef get_shape_id(array):\\n return \\\"_\\\".join([str(element) for element in array])\\n\", \"path\": \"asv_bench/benchmarks/utils.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3506,"string":"3,506"},"num_tokens_diff":{"kind":"number","value":623,"string":"623"}}},{"rowIdx":18165,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_13862"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pulp__pulpcore-2665"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nImprove proxy connection failure error message\nRaise a more informative error message when the proxy rejects requests from Pulp\r\n\r\nBZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485\n\n\n\n[start of pulpcore/download/http.py]\n1 import logging\n2 \n3 import aiohttp\n4 import asyncio\n5 import backoff\n6 \n7 from .base import BaseDownloader, DownloadResult\n8 from pulpcore.exceptions import (\n9 DigestValidationError,\n10 SizeValidationError,\n11 TimeoutException,\n12 )\n13 \n14 \n15 log = logging.getLogger(__name__)\n16 \n17 \n18 logging.getLogger(\"backoff\").addHandler(logging.StreamHandler())\n19 \n20 \n21 def http_giveup_handler(exc):\n22 \"\"\"\n23 Inspect a raised exception and determine if we should give up.\n24 \n25 Do not give up when the error is one of the following:\n26 \n27 HTTP 429 - Too Many Requests\n28 HTTP 5xx - Server errors\n29 Socket timeout\n30 TCP disconnect\n31 Client SSL Error\n32 \n33 Based on the AWS and Google Cloud guidelines:\n34 https://docs.aws.amazon.com/general/latest/gr/api-retries.html\n35 https://cloud.google.com/storage/docs/retry-strategy\n36 \n37 Args:\n38 exc (Exception): The exception to inspect\n39 \n40 Returns:\n41 True if the download should give up, False otherwise\n42 \"\"\"\n43 if isinstance(exc, aiohttp.ClientResponseError):\n44 server_error = 500 <= exc.code < 600\n45 too_many_requests = exc.code == 429\n46 return not server_error and not too_many_requests\n47 \n48 # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal\n49 return False\n50 \n51 \n52 class HttpDownloader(BaseDownloader):\n53 \"\"\"\n54 An HTTP/HTTPS Downloader built on `aiohttp`.\n55 \n56 This downloader downloads data from one `url` and is not reused.\n57 \n58 The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This\n59 allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool,\n60 connection reuse, and keep-alives across multiple downloaders. When creating many downloaders,\n61 have one session shared by all of your `HttpDownloader` objects.\n62 \n63 A session is optional; if omitted, one session will be created, used for this downloader, and\n64 then closed when the download is complete. A session that is passed in will not be closed when\n65 the download is complete.\n66 \n67 If a session is not provided, the one created by HttpDownloader uses non-default timing values.\n68 Specifically, the \"total\" timeout is set to None and the \"sock_connect\" and \"sock_read\" are both\n69 5 minutes. For more info on these settings, see the aiohttp docs:\n70 http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should\n71 allow for an active download to be arbitrarily long, while still detecting dead or closed\n72 sessions even when TCPKeepAlive is disabled.\n73 \n74 If a session is not provided, the one created will force TCP connection closure after each\n75 request. This is done for compatibility reasons due to various issues related to session\n76 continuation implementation in various servers.\n77 \n78 `aiohttp.ClientSession` objects allows you to configure options that will apply to all\n79 downloaders using that session such as auth, timeouts, headers, etc. For more info on these\n80 options see the `aiohttp.ClientSession` docs for more information:\n81 http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession\n82 \n83 The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a\n84 `aiohttp.TCPConnector`. For information on configuring either server or client certificate based\n85 identity verification, see the aiohttp documentation:\n86 http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets\n87 \n88 For more information on `aiohttp.BasicAuth` objects, see their docs:\n89 http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth\n90 \n91 Synchronous Download::\n92 \n93 downloader = HttpDownloader('http://example.com/')\n94 result = downloader.fetch()\n95 \n96 Parallel Download::\n97 \n98 download_coroutines = [\n99 HttpDownloader('http://example.com/').run(),\n100 HttpDownloader('http://pulpproject.org/').run(),\n101 ]\n102 \n103 loop = asyncio.get_event_loop()\n104 done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines))\n105 \n106 for task in done:\n107 try:\n108 task.result() # This is a DownloadResult\n109 except Exception as error:\n110 pass # fatal exceptions are raised by result()\n111 \n112 The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response.\n113 The coroutine will automatically retry 10 times with exponential backoff before allowing a\n114 final exception to be raised.\n115 \n116 Attributes:\n117 session (aiohttp.ClientSession): The session to be used by the downloader.\n118 auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None\n119 proxy (str): An optional proxy URL or None\n120 proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n121 Authorization or None\n122 headers_ready_callback (callable): An optional callback that accepts a single dictionary\n123 as its argument. The callback will be called when the response headers are\n124 available. The dictionary passed has the header names as the keys and header values\n125 as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None.\n126 \n127 This downloader also has all of the attributes of\n128 :class:`~pulpcore.plugin.download.BaseDownloader`\n129 \"\"\"\n130 \n131 def __init__(\n132 self,\n133 url,\n134 session=None,\n135 auth=None,\n136 proxy=None,\n137 proxy_auth=None,\n138 headers_ready_callback=None,\n139 headers=None,\n140 throttler=None,\n141 max_retries=0,\n142 **kwargs,\n143 ):\n144 \"\"\"\n145 Args:\n146 url (str): The url to download.\n147 session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If\n148 not specified it will open the session and close it\n149 auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional)\n150 proxy (str): An optional proxy URL.\n151 proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n152 Authorization.\n153 headers_ready_callback (callable): An optional callback that accepts a single dictionary\n154 as its argument. The callback will be called when the response headers are\n155 available. The dictionary passed has the header names as the keys and header values\n156 as its values. e.g. `{'Transfer-Encoding': 'chunked'}`\n157 headers (dict): Headers to be submitted with the request.\n158 throttler (asyncio_throttle.Throttler): Throttler for asyncio.\n159 max_retries (int): The maximum number of times to retry a download upon failure.\n160 kwargs (dict): This accepts the parameters of\n161 :class:`~pulpcore.plugin.download.BaseDownloader`.\n162 \"\"\"\n163 if session:\n164 self.session = session\n165 self._close_session_on_finalize = False\n166 else:\n167 timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)\n168 conn = aiohttp.TCPConnector({\"force_close\": True})\n169 self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)\n170 self._close_session_on_finalize = True\n171 self.auth = auth\n172 self.proxy = proxy\n173 self.proxy_auth = proxy_auth\n174 self.headers_ready_callback = headers_ready_callback\n175 self.download_throttler = throttler\n176 self.max_retries = max_retries\n177 super().__init__(url, **kwargs)\n178 \n179 def raise_for_status(self, response):\n180 \"\"\"\n181 Raise error if aiohttp response status is >= 400 and not silenced.\n182 \n183 Args:\n184 response (aiohttp.ClientResponse): The response to handle.\n185 \n186 Raises:\n187 aiohttp.ClientResponseError: When the response status is >= 400.\n188 \"\"\"\n189 response.raise_for_status()\n190 \n191 async def _handle_response(self, response):\n192 \"\"\"\n193 Handle the aiohttp response by writing it to disk and calculating digests\n194 \n195 Args:\n196 response (aiohttp.ClientResponse): The response to handle.\n197 \n198 Returns:\n199 DownloadResult: Contains information about the result. See the DownloadResult docs for\n200 more information.\n201 \"\"\"\n202 if self.headers_ready_callback:\n203 await self.headers_ready_callback(response.headers)\n204 while True:\n205 chunk = await response.content.read(1048576) # 1 megabyte\n206 if not chunk:\n207 await self.finalize()\n208 break # the download is done\n209 await self.handle_data(chunk)\n210 return DownloadResult(\n211 path=self.path,\n212 artifact_attributes=self.artifact_attributes,\n213 url=self.url,\n214 headers=response.headers,\n215 )\n216 \n217 async def run(self, extra_data=None):\n218 \"\"\"\n219 Run the downloader with concurrency restriction and retry logic.\n220 \n221 This method acquires `self.semaphore` before calling the actual download implementation\n222 contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n223 wrapper around `_run()`, handles backoff-and-retry logic.\n224 \n225 Args:\n226 extra_data (dict): Extra data passed to the downloader.\n227 \n228 Returns:\n229 :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n230 \n231 \"\"\"\n232 retryable_errors = (\n233 aiohttp.ClientConnectorSSLError,\n234 aiohttp.ClientConnectorError,\n235 aiohttp.ClientOSError,\n236 aiohttp.ClientPayloadError,\n237 aiohttp.ClientResponseError,\n238 aiohttp.ServerDisconnectedError,\n239 TimeoutError,\n240 TimeoutException,\n241 DigestValidationError,\n242 SizeValidationError,\n243 )\n244 \n245 async with self.semaphore:\n246 \n247 @backoff.on_exception(\n248 backoff.expo,\n249 retryable_errors,\n250 max_tries=self.max_retries + 1,\n251 giveup=http_giveup_handler,\n252 )\n253 async def download_wrapper():\n254 self._ensure_no_broken_file()\n255 try:\n256 return await self._run(extra_data=extra_data)\n257 except asyncio.TimeoutError:\n258 raise TimeoutException(self.url)\n259 \n260 return await download_wrapper()\n261 \n262 async def _run(self, extra_data=None):\n263 \"\"\"\n264 Download, validate, and compute digests on the `url`. This is a coroutine.\n265 \n266 This method is externally wrapped with backoff-and-retry behavior for some errors.\n267 It retries with exponential backoff some number of times before allowing a final\n268 exception to be raised.\n269 \n270 This method provides the same return object type and documented in\n271 :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n272 \n273 Args:\n274 extra_data (dict): Extra data passed by the downloader.\n275 \"\"\"\n276 if self.download_throttler:\n277 await self.download_throttler.acquire()\n278 async with self.session.get(\n279 self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth\n280 ) as response:\n281 self.raise_for_status(response)\n282 to_return = await self._handle_response(response)\n283 await response.release()\n284 if self._close_session_on_finalize:\n285 await self.session.close()\n286 return to_return\n287 \n288 def _ensure_no_broken_file(self):\n289 \"\"\"Upon retry reset writer back to None to get a fresh file.\"\"\"\n290 if self._writer is not None:\n291 self._writer.delete = True\n292 self._writer.close()\n293 self._writer = None\n294\n[end of pulpcore/download/http.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py\n--- a/pulpcore/download/http.py\n+++ b/pulpcore/download/http.py\n@@ -256,6 +256,17 @@\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n+ except aiohttp.ClientHttpProxyError as e:\n+ log.error(\n+ \"Proxy {!r} rejected connection request during a request to \"\n+ \"{!r}, status={}, message={!r}\".format(\n+ e.request_info.real_url,\n+ e.request_info.url,\n+ e.status,\n+ e.message,\n+ )\n+ )\n+ raise e\n \n return await download_wrapper()\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py\\n--- a/pulpcore/download/http.py\\n+++ b/pulpcore/download/http.py\\n@@ -256,6 +256,17 @@\\n return await self._run(extra_data=extra_data)\\n except asyncio.TimeoutError:\\n raise TimeoutException(self.url)\\n+ except aiohttp.ClientHttpProxyError as e:\\n+ log.error(\\n+ \\\"Proxy {!r} rejected connection request during a request to \\\"\\n+ \\\"{!r}, status={}, message={!r}\\\".format(\\n+ e.request_info.real_url,\\n+ e.request_info.url,\\n+ e.status,\\n+ e.message,\\n+ )\\n+ )\\n+ raise e\\n \\n return await download_wrapper()\\n\", \"issue\": \"Improve proxy connection failure error message\\nRaise a more informative error message when the proxy rejects requests from Pulp\\r\\n\\r\\nBZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485\\n\", \"before_files\": [{\"content\": \"import logging\\n\\nimport aiohttp\\nimport asyncio\\nimport backoff\\n\\nfrom .base import BaseDownloader, DownloadResult\\nfrom pulpcore.exceptions import (\\n DigestValidationError,\\n SizeValidationError,\\n TimeoutException,\\n)\\n\\n\\nlog = logging.getLogger(__name__)\\n\\n\\nlogging.getLogger(\\\"backoff\\\").addHandler(logging.StreamHandler())\\n\\n\\ndef http_giveup_handler(exc):\\n \\\"\\\"\\\"\\n Inspect a raised exception and determine if we should give up.\\n\\n Do not give up when the error is one of the following:\\n\\n HTTP 429 - Too Many Requests\\n HTTP 5xx - Server errors\\n Socket timeout\\n TCP disconnect\\n Client SSL Error\\n\\n Based on the AWS and Google Cloud guidelines:\\n https://docs.aws.amazon.com/general/latest/gr/api-retries.html\\n https://cloud.google.com/storage/docs/retry-strategy\\n\\n Args:\\n exc (Exception): The exception to inspect\\n\\n Returns:\\n True if the download should give up, False otherwise\\n \\\"\\\"\\\"\\n if isinstance(exc, aiohttp.ClientResponseError):\\n server_error = 500 <= exc.code < 600\\n too_many_requests = exc.code == 429\\n return not server_error and not too_many_requests\\n\\n # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal\\n return False\\n\\n\\nclass HttpDownloader(BaseDownloader):\\n \\\"\\\"\\\"\\n An HTTP/HTTPS Downloader built on `aiohttp`.\\n\\n This downloader downloads data from one `url` and is not reused.\\n\\n The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This\\n allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool,\\n connection reuse, and keep-alives across multiple downloaders. When creating many downloaders,\\n have one session shared by all of your `HttpDownloader` objects.\\n\\n A session is optional; if omitted, one session will be created, used for this downloader, and\\n then closed when the download is complete. A session that is passed in will not be closed when\\n the download is complete.\\n\\n If a session is not provided, the one created by HttpDownloader uses non-default timing values.\\n Specifically, the \\\"total\\\" timeout is set to None and the \\\"sock_connect\\\" and \\\"sock_read\\\" are both\\n 5 minutes. For more info on these settings, see the aiohttp docs:\\n http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should\\n allow for an active download to be arbitrarily long, while still detecting dead or closed\\n sessions even when TCPKeepAlive is disabled.\\n\\n If a session is not provided, the one created will force TCP connection closure after each\\n request. This is done for compatibility reasons due to various issues related to session\\n continuation implementation in various servers.\\n\\n `aiohttp.ClientSession` objects allows you to configure options that will apply to all\\n downloaders using that session such as auth, timeouts, headers, etc. For more info on these\\n options see the `aiohttp.ClientSession` docs for more information:\\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession\\n\\n The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a\\n `aiohttp.TCPConnector`. For information on configuring either server or client certificate based\\n identity verification, see the aiohttp documentation:\\n http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets\\n\\n For more information on `aiohttp.BasicAuth` objects, see their docs:\\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth\\n\\n Synchronous Download::\\n\\n downloader = HttpDownloader('http://example.com/')\\n result = downloader.fetch()\\n\\n Parallel Download::\\n\\n download_coroutines = [\\n HttpDownloader('http://example.com/').run(),\\n HttpDownloader('http://pulpproject.org/').run(),\\n ]\\n\\n loop = asyncio.get_event_loop()\\n done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines))\\n\\n for task in done:\\n try:\\n task.result() # This is a DownloadResult\\n except Exception as error:\\n pass # fatal exceptions are raised by result()\\n\\n The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response.\\n The coroutine will automatically retry 10 times with exponential backoff before allowing a\\n final exception to be raised.\\n\\n Attributes:\\n session (aiohttp.ClientSession): The session to be used by the downloader.\\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None\\n proxy (str): An optional proxy URL or None\\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\\n Authorization or None\\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\\n as its argument. The callback will be called when the response headers are\\n available. The dictionary passed has the header names as the keys and header values\\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None.\\n\\n This downloader also has all of the attributes of\\n :class:`~pulpcore.plugin.download.BaseDownloader`\\n \\\"\\\"\\\"\\n\\n def __init__(\\n self,\\n url,\\n session=None,\\n auth=None,\\n proxy=None,\\n proxy_auth=None,\\n headers_ready_callback=None,\\n headers=None,\\n throttler=None,\\n max_retries=0,\\n **kwargs,\\n ):\\n \\\"\\\"\\\"\\n Args:\\n url (str): The url to download.\\n session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If\\n not specified it will open the session and close it\\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional)\\n proxy (str): An optional proxy URL.\\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\\n Authorization.\\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\\n as its argument. The callback will be called when the response headers are\\n available. The dictionary passed has the header names as the keys and header values\\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`\\n headers (dict): Headers to be submitted with the request.\\n throttler (asyncio_throttle.Throttler): Throttler for asyncio.\\n max_retries (int): The maximum number of times to retry a download upon failure.\\n kwargs (dict): This accepts the parameters of\\n :class:`~pulpcore.plugin.download.BaseDownloader`.\\n \\\"\\\"\\\"\\n if session:\\n self.session = session\\n self._close_session_on_finalize = False\\n else:\\n timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)\\n conn = aiohttp.TCPConnector({\\\"force_close\\\": True})\\n self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)\\n self._close_session_on_finalize = True\\n self.auth = auth\\n self.proxy = proxy\\n self.proxy_auth = proxy_auth\\n self.headers_ready_callback = headers_ready_callback\\n self.download_throttler = throttler\\n self.max_retries = max_retries\\n super().__init__(url, **kwargs)\\n\\n def raise_for_status(self, response):\\n \\\"\\\"\\\"\\n Raise error if aiohttp response status is >= 400 and not silenced.\\n\\n Args:\\n response (aiohttp.ClientResponse): The response to handle.\\n\\n Raises:\\n aiohttp.ClientResponseError: When the response status is >= 400.\\n \\\"\\\"\\\"\\n response.raise_for_status()\\n\\n async def _handle_response(self, response):\\n \\\"\\\"\\\"\\n Handle the aiohttp response by writing it to disk and calculating digests\\n\\n Args:\\n response (aiohttp.ClientResponse): The response to handle.\\n\\n Returns:\\n DownloadResult: Contains information about the result. See the DownloadResult docs for\\n more information.\\n \\\"\\\"\\\"\\n if self.headers_ready_callback:\\n await self.headers_ready_callback(response.headers)\\n while True:\\n chunk = await response.content.read(1048576) # 1 megabyte\\n if not chunk:\\n await self.finalize()\\n break # the download is done\\n await self.handle_data(chunk)\\n return DownloadResult(\\n path=self.path,\\n artifact_attributes=self.artifact_attributes,\\n url=self.url,\\n headers=response.headers,\\n )\\n\\n async def run(self, extra_data=None):\\n \\\"\\\"\\\"\\n Run the downloader with concurrency restriction and retry logic.\\n\\n This method acquires `self.semaphore` before calling the actual download implementation\\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\\n wrapper around `_run()`, handles backoff-and-retry logic.\\n\\n Args:\\n extra_data (dict): Extra data passed to the downloader.\\n\\n Returns:\\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\\n\\n \\\"\\\"\\\"\\n retryable_errors = (\\n aiohttp.ClientConnectorSSLError,\\n aiohttp.ClientConnectorError,\\n aiohttp.ClientOSError,\\n aiohttp.ClientPayloadError,\\n aiohttp.ClientResponseError,\\n aiohttp.ServerDisconnectedError,\\n TimeoutError,\\n TimeoutException,\\n DigestValidationError,\\n SizeValidationError,\\n )\\n\\n async with self.semaphore:\\n\\n @backoff.on_exception(\\n backoff.expo,\\n retryable_errors,\\n max_tries=self.max_retries + 1,\\n giveup=http_giveup_handler,\\n )\\n async def download_wrapper():\\n self._ensure_no_broken_file()\\n try:\\n return await self._run(extra_data=extra_data)\\n except asyncio.TimeoutError:\\n raise TimeoutException(self.url)\\n\\n return await download_wrapper()\\n\\n async def _run(self, extra_data=None):\\n \\\"\\\"\\\"\\n Download, validate, and compute digests on the `url`. This is a coroutine.\\n\\n This method is externally wrapped with backoff-and-retry behavior for some errors.\\n It retries with exponential backoff some number of times before allowing a final\\n exception to be raised.\\n\\n This method provides the same return object type and documented in\\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\\n\\n Args:\\n extra_data (dict): Extra data passed by the downloader.\\n \\\"\\\"\\\"\\n if self.download_throttler:\\n await self.download_throttler.acquire()\\n async with self.session.get(\\n self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth\\n ) as response:\\n self.raise_for_status(response)\\n to_return = await self._handle_response(response)\\n await response.release()\\n if self._close_session_on_finalize:\\n await self.session.close()\\n return to_return\\n\\n def _ensure_no_broken_file(self):\\n \\\"\\\"\\\"Upon retry reset writer back to None to get a fresh file.\\\"\\\"\\\"\\n if self._writer is not None:\\n self._writer.delete = True\\n self._writer.close()\\n self._writer = None\\n\", \"path\": \"pulpcore/download/http.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3786,"string":"3,786"},"num_tokens_diff":{"kind":"number","value":164,"string":"164"}}},{"rowIdx":18166,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_599"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pex-tool__pex-1834"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nRelease 2.1.95\nOn the docket:\r\n+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821\r\n+ [x] Feature request: \"universal\" lock mode can reject unsupported platforms #1595\r\n+ [x] Avoid ENOEXEC for --venv shebangs. #1828 \r\n+ [x] pex3 lock export does't seem to respect the platform flag. #1826\r\n+ [x] Clarify pex3 lock export command. #1645\r\n+ [x] Support exporting PYTHONPATH before running user code #1825\n\n\n\n[start of pex/version.py]\n1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n2 # Licensed under the Apache License, Version 2.0 (see LICENSE).\n3 \n4 __version__ = \"2.1.94\"\n5\n[end of pex/version.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.94\"\n+__version__ = \"2.1.95\"\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pex/version.py b/pex/version.py\\n--- a/pex/version.py\\n+++ b/pex/version.py\\n@@ -1,4 +1,4 @@\\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\\n \\n-__version__ = \\\"2.1.94\\\"\\n+__version__ = \\\"2.1.95\\\"\\n\", \"issue\": \"Release 2.1.95\\nOn the docket:\\r\\n+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821\\r\\n+ [x] Feature request: \\\"universal\\\" lock mode can reject unsupported platforms #1595\\r\\n+ [x] Avoid ENOEXEC for --venv shebangs. #1828 \\r\\n+ [x] pex3 lock export does't seem to respect the platform flag. #1826\\r\\n+ [x] Clarify pex3 lock export command. #1645\\r\\n+ [x] Support exporting PYTHONPATH before running user code #1825\\n\", \"before_files\": [{\"content\": \"# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\\n\\n__version__ = \\\"2.1.94\\\"\\n\", \"path\": \"pex/version.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":729,"string":"729"},"num_tokens_diff":{"kind":"number","value":96,"string":"96"}}},{"rowIdx":18167,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_17669"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"gammapy__gammapy-1690"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nIndexError from SpectrumEnergyGroupMaker\nIn this example CTA DC1 analysis the SpectrumEnergyGroupMaker give an IndexError if e.g. `emax=100 TeV` is chosen (see comment in cell 23):\r\n\r\nhttps://github.com/gammasky/cta-analyses/blob/bf571038b389b3eb13ce8ba81b35384ebd4b6750/dc-1-checks/hess_j1702/spectrum.ipynb\n\n\n\n[start of gammapy/spectrum/energy_group.py]\n1 # Licensed under a 3-clause BSD style license - see LICENSE.rst\n2 \"\"\"Spectrum energy bin grouping.\n3 \n4 There are three classes:\n5 \n6 * SpectrumEnergyGroup - one group\n7 * SpectrumEnergyGroups - one grouping, i.e. collection of groups\n8 * SpectrumEnergyGroupMaker - algorithms to compute groupings.\n9 \n10 Algorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.\n11 The difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that\n12 just have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses\n13 information from SpectrumObservation (e.g. safe energy range or counts data) and\n14 implements higher-level algorithms.\n15 \"\"\"\n16 from __future__ import absolute_import, division, print_function, unicode_literals\n17 from collections import OrderedDict\n18 from copy import deepcopy\n19 import numpy as np\n20 import logging\n21 from ..extern.six.moves import UserList\n22 from astropy.units import Quantity\n23 from astropy.table import Table\n24 from astropy.table import vstack as table_vstack\n25 from ..utils.table import table_from_row_data, table_row_to_dict\n26 \n27 __all__ = [\n28 'SpectrumEnergyGroup',\n29 'SpectrumEnergyGroups',\n30 'SpectrumEnergyGroupMaker',\n31 ]\n32 \n33 log = logging.getLogger(__name__)\n34 \n35 \n36 class SpectrumEnergyGroup(object):\n37 \"\"\"Spectrum energy group.\n38 \n39 Represents a consecutive range of bin indices (both ends inclusive).\n40 \"\"\"\n41 fields = [\n42 'energy_group_idx', 'bin_idx_min', 'bin_idx_max',\n43 'bin_type', 'energy_min', 'energy_max',\n44 ]\n45 \"\"\"List of data members of this class.\"\"\"\n46 \n47 valid_bin_types = ['normal', 'underflow', 'overflow']\n48 \"\"\"Valid values for ``bin_types`` attribute.\"\"\"\n49 \n50 def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,\n51 energy_min, energy_max):\n52 self.energy_group_idx = energy_group_idx\n53 self.bin_idx_min = bin_idx_min\n54 self.bin_idx_max = bin_idx_max\n55 if bin_type not in self.valid_bin_types:\n56 raise ValueError('Invalid bin type: {}'.format(bin_type))\n57 self.bin_type = bin_type\n58 self.energy_min = Quantity(energy_min)\n59 self.energy_max = Quantity(energy_max)\n60 \n61 @classmethod\n62 def from_dict(cls, data):\n63 data = dict((_, data[_]) for _ in cls.fields)\n64 return cls(**data)\n65 \n66 @property\n67 def _data(self):\n68 return [(_, getattr(self, _)) for _ in self.fields]\n69 \n70 def __repr__(self):\n71 txt = ['{}={!r}'.format(k, v) for k, v in self._data]\n72 return '{}({})'.format(self.__class__.__name__, ', '.join(txt))\n73 \n74 def __eq__(self, other):\n75 return self.to_dict() == other.to_dict()\n76 \n77 def to_dict(self):\n78 return OrderedDict(self._data)\n79 \n80 @property\n81 def bin_idx_array(self):\n82 \"\"\"Numpy array of bin indices in the group.\"\"\"\n83 return np.arange(self.bin_idx_min, self.bin_idx_max + 1)\n84 \n85 @property\n86 def bin_table(self):\n87 \"\"\"Create `~astropy.table.Table` with bins in the group.\n88 \n89 Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``\n90 \"\"\"\n91 table = Table()\n92 table['bin_idx'] = self.bin_idx_array\n93 table['energy_group_idx'] = self.energy_group_idx\n94 table['bin_type'] = self.bin_type\n95 table['energy_min'] = self.energy_min\n96 table['energy_max'] = self.energy_max\n97 return table\n98 \n99 \n100 class SpectrumEnergyGroups(UserList):\n101 \"\"\"List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.\n102 \n103 A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`.\n104 \"\"\"\n105 \n106 def __repr__(self):\n107 return '{}(len={})'.format(self.__class__.__name__, len(self))\n108 \n109 def __str__(self):\n110 ss = '{}:\\n'.format(self.__class__.__name__)\n111 lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)\n112 ss += '\\n'.join(lines)\n113 return ss + '\\n'\n114 \n115 def copy(self):\n116 \"\"\"Deep copy\"\"\"\n117 return deepcopy(self)\n118 \n119 @classmethod\n120 def from_total_table(cls, table):\n121 \"\"\"Create list of SpectrumEnergyGroup objects from table.\"\"\"\n122 groups = cls()\n123 \n124 for energy_group_idx in np.unique(table['energy_group_idx']):\n125 mask = table['energy_group_idx'] == energy_group_idx\n126 group_table = table[mask]\n127 bin_idx_min = group_table['bin_idx'][0]\n128 bin_idx_max = group_table['bin_idx'][-1]\n129 if len(set(group_table['bin_type'])) > 1:\n130 raise ValueError('Inconsistent bin_type within group.')\n131 bin_type = group_table['bin_type'][0]\n132 energy_min = group_table['energy_min'].quantity[0]\n133 energy_max = group_table['energy_max'].quantity[-1]\n134 \n135 group = SpectrumEnergyGroup(\n136 energy_group_idx=energy_group_idx,\n137 bin_idx_min=bin_idx_min,\n138 bin_idx_max=bin_idx_max,\n139 bin_type=bin_type,\n140 energy_min=energy_min,\n141 energy_max=energy_max,\n142 )\n143 groups.append(group)\n144 \n145 return groups\n146 \n147 @classmethod\n148 def from_group_table(cls, table):\n149 \"\"\"Create from energy groups in `~astropy.table.Table` format.\"\"\"\n150 return cls([\n151 SpectrumEnergyGroup.from_dict(table_row_to_dict(row))\n152 for row in table\n153 ])\n154 \n155 def to_total_table(self):\n156 \"\"\"Table with one energy bin per row (`~astropy.table.Table`).\n157 \n158 Columns:\n159 \n160 * ``energy_group_idx`` - Energy group index (int)\n161 * ``bin_idx`` - Energy bin index (int)\n162 * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n163 \n164 There are no energy columns, because the per-bin energy info\n165 was lost during grouping.\n166 \"\"\"\n167 tables = [group.bin_table for group in self]\n168 return table_vstack(tables)\n169 \n170 def to_group_table(self):\n171 \"\"\"Table with one energy group per row (`~astropy.table.Table`).\n172 \n173 Columns:\n174 \n175 * ``energy_group_idx`` - Energy group index (int)\n176 * ``energy_group_n_bins`` - Number of bins in the energy group (int)\n177 * ``bin_idx_min`` - First bin index in the energy group (int)\n178 * ``bin_idx_max`` - Last bin index in the energy group (int)\n179 * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n180 * ``energy_min`` - Energy group start energy (Quantity)\n181 * ``energy_max`` - Energy group end energy (Quantity)\n182 \"\"\"\n183 rows = [group.to_dict() for group in self]\n184 table = table_from_row_data(rows)\n185 return table\n186 \n187 @property\n188 def energy_range(self):\n189 \"\"\"Total energy range (`~astropy.units.Quantity` of length 2).\"\"\"\n190 return Quantity([self[0].energy_min, self[-1].energy_max])\n191 \n192 @property\n193 def energy_bounds(self):\n194 \"\"\"Energy group bounds (`~astropy.units.Quantity`).\"\"\"\n195 energy = [_.energy_min for _ in self]\n196 energy.append(self[-1].energy_max)\n197 return Quantity(energy)\n198 \n199 \n200 class SpectrumEnergyGroupMaker(object):\n201 \"\"\"Energy bin groups for spectral analysis.\n202 \n203 This class contains both methods that run algorithms\n204 that compute groupings as well as the results as data members\n205 and methods to debug and assess the results.\n206 \n207 The input ``obs`` is used read-only, to access the counts energy\n208 binning, as well as some other info that is used for energy bin grouping.\n209 \n210 Parameters\n211 ----------\n212 obs : `~gammapy.spectrum.SpectrumObservation`\n213 Spectrum observation\n214 \n215 Attributes\n216 ----------\n217 obs : `~gammapy.spectrum.SpectrumObservation`\n218 Spectrum observation data\n219 groups : `~gammapy.spectrum.SpectrumEnergyGroups`\n220 List of energy groups\n221 \n222 See also\n223 --------\n224 SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator\n225 \"\"\"\n226 \n227 def __init__(self, obs):\n228 self.obs = obs\n229 self.groups = None\n230 \n231 def groups_from_obs(self):\n232 \"\"\"Compute energy groups with one group per energy bin.\"\"\"\n233 ebounds_obs = self.obs.e_reco\n234 size = ebounds_obs.nbins\n235 table = Table()\n236 table['bin_idx'] = np.arange(size)\n237 table['energy_group_idx'] = np.arange(size)\n238 table['bin_type'] = ['normal'] * size\n239 table['energy_min'] = ebounds_obs.lower_bounds\n240 table['energy_max'] = ebounds_obs.upper_bounds\n241 self.groups = SpectrumEnergyGroups.from_total_table(table)\n242 \n243 def compute_groups_fixed(self, ebounds):\n244 \"\"\"Apply grouping for a given fixed energy binning.\n245 \n246 This groups the observation ``obs.e_reco`` binning and\n247 ``ebounds`` using a nearest neighbor match on the bin edges.\n248 \n249 Parameters\n250 ----------\n251 ebounds : `~astropy.units.Quantity`\n252 Energy bounds array\n253 \"\"\"\n254 ebounds_src = self.obs.e_reco\n255 bin_edges_src = np.arange(len(ebounds_src))\n256 \n257 temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n258 bin_edges = np.round(temp, decimals=0).astype(np.int)\n259 \n260 # Check for duplicates\n261 duplicates_removed = set(bin_edges)\n262 if len(duplicates_removed) != len(bin_edges):\n263 warn_str = \"Input binning\\n{}\\n contains bins that are finer than the\"\n264 warn_str += \" target binning\\n{}\\n or outside the valid range\"\n265 log.warning(warn_str.format(ebounds, ebounds_src))\n266 bin_edges = sorted(duplicates_removed)\n267 \n268 # Create normal bins\n269 groups = []\n270 for idx in np.arange(len(bin_edges) - 1):\n271 group = SpectrumEnergyGroup(\n272 energy_group_idx=-1,\n273 bin_idx_min=bin_edges[idx],\n274 bin_idx_max=bin_edges[idx + 1] - 1,\n275 bin_type='normal',\n276 energy_min=ebounds_src[bin_edges[idx]],\n277 energy_max=ebounds_src[bin_edges[idx + 1]],\n278 )\n279 groups.append(group)\n280 \n281 # Add underflow bin\n282 start_edge = groups[0].bin_idx_min\n283 if start_edge != 0:\n284 underflow = SpectrumEnergyGroup(\n285 energy_group_idx=-1,\n286 bin_idx_min=0,\n287 bin_idx_max=start_edge - 1,\n288 bin_type='underflow',\n289 energy_min=ebounds_src[0],\n290 energy_max=ebounds_src[start_edge],\n291 )\n292 groups.insert(0, underflow)\n293 \n294 # Add overflow bin\n295 end_edge = groups[-1].bin_idx_max\n296 if end_edge != ebounds_src.nbins - 1:\n297 overflow = SpectrumEnergyGroup(\n298 energy_group_idx=-1,\n299 bin_idx_min=end_edge + 1,\n300 bin_idx_max=ebounds_src.nbins - 1,\n301 bin_type='overflow',\n302 energy_min=ebounds_src[end_edge + 1],\n303 energy_max=ebounds_src[-1],\n304 )\n305 groups.append(overflow)\n306 \n307 # Set energy_group_idx\n308 for group_idx, group in enumerate(groups):\n309 group.energy_group_idx = group_idx\n310 \n311 self.groups = SpectrumEnergyGroups(groups)\n312\n[end of gammapy/spectrum/energy_group.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/gammapy/spectrum/energy_group.py b/gammapy/spectrum/energy_group.py\n--- a/gammapy/spectrum/energy_group.py\n+++ b/gammapy/spectrum/energy_group.py\n@@ -251,7 +251,7 @@\n ebounds : `~astropy.units.Quantity`\n Energy bounds array\n \"\"\"\n- ebounds_src = self.obs.e_reco\n+ ebounds_src = self.obs.e_reco.to(ebounds.unit)\n bin_edges_src = np.arange(len(ebounds_src))\n \n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n@@ -278,6 +278,11 @@\n )\n groups.append(group)\n \n+ if groups == []:\n+ err_str = \"Input binning\\n{}\\n has no overlap with\"\n+ err_str += \" target binning\\n{}\"\n+ raise ValueError(err_str.format(ebounds, ebounds_src))\n+\n # Add underflow bin\n start_edge = groups[0].bin_idx_min\n if start_edge != 0:\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/gammapy/spectrum/energy_group.py b/gammapy/spectrum/energy_group.py\\n--- a/gammapy/spectrum/energy_group.py\\n+++ b/gammapy/spectrum/energy_group.py\\n@@ -251,7 +251,7 @@\\n ebounds : `~astropy.units.Quantity`\\n Energy bounds array\\n \\\"\\\"\\\"\\n- ebounds_src = self.obs.e_reco\\n+ ebounds_src = self.obs.e_reco.to(ebounds.unit)\\n bin_edges_src = np.arange(len(ebounds_src))\\n \\n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\\n@@ -278,6 +278,11 @@\\n )\\n groups.append(group)\\n \\n+ if groups == []:\\n+ err_str = \\\"Input binning\\\\n{}\\\\n has no overlap with\\\"\\n+ err_str += \\\" target binning\\\\n{}\\\"\\n+ raise ValueError(err_str.format(ebounds, ebounds_src))\\n+\\n # Add underflow bin\\n start_edge = groups[0].bin_idx_min\\n if start_edge != 0:\\n\", \"issue\": \"IndexError from SpectrumEnergyGroupMaker\\nIn this example CTA DC1 analysis the SpectrumEnergyGroupMaker give an IndexError if e.g. `emax=100 TeV` is chosen (see comment in cell 23):\\r\\n\\r\\nhttps://github.com/gammasky/cta-analyses/blob/bf571038b389b3eb13ce8ba81b35384ebd4b6750/dc-1-checks/hess_j1702/spectrum.ipynb\\n\", \"before_files\": [{\"content\": \"# Licensed under a 3-clause BSD style license - see LICENSE.rst\\n\\\"\\\"\\\"Spectrum energy bin grouping.\\n\\nThere are three classes:\\n\\n* SpectrumEnergyGroup - one group\\n* SpectrumEnergyGroups - one grouping, i.e. collection of groups\\n* SpectrumEnergyGroupMaker - algorithms to compute groupings.\\n\\nAlgorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.\\nThe difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that\\njust have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses\\ninformation from SpectrumObservation (e.g. safe energy range or counts data) and\\nimplements higher-level algorithms.\\n\\\"\\\"\\\"\\nfrom __future__ import absolute_import, division, print_function, unicode_literals\\nfrom collections import OrderedDict\\nfrom copy import deepcopy\\nimport numpy as np\\nimport logging\\nfrom ..extern.six.moves import UserList\\nfrom astropy.units import Quantity\\nfrom astropy.table import Table\\nfrom astropy.table import vstack as table_vstack\\nfrom ..utils.table import table_from_row_data, table_row_to_dict\\n\\n__all__ = [\\n 'SpectrumEnergyGroup',\\n 'SpectrumEnergyGroups',\\n 'SpectrumEnergyGroupMaker',\\n]\\n\\nlog = logging.getLogger(__name__)\\n\\n\\nclass SpectrumEnergyGroup(object):\\n \\\"\\\"\\\"Spectrum energy group.\\n\\n Represents a consecutive range of bin indices (both ends inclusive).\\n \\\"\\\"\\\"\\n fields = [\\n 'energy_group_idx', 'bin_idx_min', 'bin_idx_max',\\n 'bin_type', 'energy_min', 'energy_max',\\n ]\\n \\\"\\\"\\\"List of data members of this class.\\\"\\\"\\\"\\n\\n valid_bin_types = ['normal', 'underflow', 'overflow']\\n \\\"\\\"\\\"Valid values for ``bin_types`` attribute.\\\"\\\"\\\"\\n\\n def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,\\n energy_min, energy_max):\\n self.energy_group_idx = energy_group_idx\\n self.bin_idx_min = bin_idx_min\\n self.bin_idx_max = bin_idx_max\\n if bin_type not in self.valid_bin_types:\\n raise ValueError('Invalid bin type: {}'.format(bin_type))\\n self.bin_type = bin_type\\n self.energy_min = Quantity(energy_min)\\n self.energy_max = Quantity(energy_max)\\n\\n @classmethod\\n def from_dict(cls, data):\\n data = dict((_, data[_]) for _ in cls.fields)\\n return cls(**data)\\n\\n @property\\n def _data(self):\\n return [(_, getattr(self, _)) for _ in self.fields]\\n\\n def __repr__(self):\\n txt = ['{}={!r}'.format(k, v) for k, v in self._data]\\n return '{}({})'.format(self.__class__.__name__, ', '.join(txt))\\n\\n def __eq__(self, other):\\n return self.to_dict() == other.to_dict()\\n\\n def to_dict(self):\\n return OrderedDict(self._data)\\n\\n @property\\n def bin_idx_array(self):\\n \\\"\\\"\\\"Numpy array of bin indices in the group.\\\"\\\"\\\"\\n return np.arange(self.bin_idx_min, self.bin_idx_max + 1)\\n\\n @property\\n def bin_table(self):\\n \\\"\\\"\\\"Create `~astropy.table.Table` with bins in the group.\\n\\n Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``\\n \\\"\\\"\\\"\\n table = Table()\\n table['bin_idx'] = self.bin_idx_array\\n table['energy_group_idx'] = self.energy_group_idx\\n table['bin_type'] = self.bin_type\\n table['energy_min'] = self.energy_min\\n table['energy_max'] = self.energy_max\\n return table\\n\\n\\nclass SpectrumEnergyGroups(UserList):\\n \\\"\\\"\\\"List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.\\n\\n A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`.\\n \\\"\\\"\\\"\\n\\n def __repr__(self):\\n return '{}(len={})'.format(self.__class__.__name__, len(self))\\n\\n def __str__(self):\\n ss = '{}:\\\\n'.format(self.__class__.__name__)\\n lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)\\n ss += '\\\\n'.join(lines)\\n return ss + '\\\\n'\\n\\n def copy(self):\\n \\\"\\\"\\\"Deep copy\\\"\\\"\\\"\\n return deepcopy(self)\\n\\n @classmethod\\n def from_total_table(cls, table):\\n \\\"\\\"\\\"Create list of SpectrumEnergyGroup objects from table.\\\"\\\"\\\"\\n groups = cls()\\n\\n for energy_group_idx in np.unique(table['energy_group_idx']):\\n mask = table['energy_group_idx'] == energy_group_idx\\n group_table = table[mask]\\n bin_idx_min = group_table['bin_idx'][0]\\n bin_idx_max = group_table['bin_idx'][-1]\\n if len(set(group_table['bin_type'])) > 1:\\n raise ValueError('Inconsistent bin_type within group.')\\n bin_type = group_table['bin_type'][0]\\n energy_min = group_table['energy_min'].quantity[0]\\n energy_max = group_table['energy_max'].quantity[-1]\\n\\n group = SpectrumEnergyGroup(\\n energy_group_idx=energy_group_idx,\\n bin_idx_min=bin_idx_min,\\n bin_idx_max=bin_idx_max,\\n bin_type=bin_type,\\n energy_min=energy_min,\\n energy_max=energy_max,\\n )\\n groups.append(group)\\n\\n return groups\\n\\n @classmethod\\n def from_group_table(cls, table):\\n \\\"\\\"\\\"Create from energy groups in `~astropy.table.Table` format.\\\"\\\"\\\"\\n return cls([\\n SpectrumEnergyGroup.from_dict(table_row_to_dict(row))\\n for row in table\\n ])\\n\\n def to_total_table(self):\\n \\\"\\\"\\\"Table with one energy bin per row (`~astropy.table.Table`).\\n\\n Columns:\\n\\n * ``energy_group_idx`` - Energy group index (int)\\n * ``bin_idx`` - Energy bin index (int)\\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\\n\\n There are no energy columns, because the per-bin energy info\\n was lost during grouping.\\n \\\"\\\"\\\"\\n tables = [group.bin_table for group in self]\\n return table_vstack(tables)\\n\\n def to_group_table(self):\\n \\\"\\\"\\\"Table with one energy group per row (`~astropy.table.Table`).\\n\\n Columns:\\n\\n * ``energy_group_idx`` - Energy group index (int)\\n * ``energy_group_n_bins`` - Number of bins in the energy group (int)\\n * ``bin_idx_min`` - First bin index in the energy group (int)\\n * ``bin_idx_max`` - Last bin index in the energy group (int)\\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\\n * ``energy_min`` - Energy group start energy (Quantity)\\n * ``energy_max`` - Energy group end energy (Quantity)\\n \\\"\\\"\\\"\\n rows = [group.to_dict() for group in self]\\n table = table_from_row_data(rows)\\n return table\\n\\n @property\\n def energy_range(self):\\n \\\"\\\"\\\"Total energy range (`~astropy.units.Quantity` of length 2).\\\"\\\"\\\"\\n return Quantity([self[0].energy_min, self[-1].energy_max])\\n\\n @property\\n def energy_bounds(self):\\n \\\"\\\"\\\"Energy group bounds (`~astropy.units.Quantity`).\\\"\\\"\\\"\\n energy = [_.energy_min for _ in self]\\n energy.append(self[-1].energy_max)\\n return Quantity(energy)\\n\\n\\nclass SpectrumEnergyGroupMaker(object):\\n \\\"\\\"\\\"Energy bin groups for spectral analysis.\\n\\n This class contains both methods that run algorithms\\n that compute groupings as well as the results as data members\\n and methods to debug and assess the results.\\n\\n The input ``obs`` is used read-only, to access the counts energy\\n binning, as well as some other info that is used for energy bin grouping.\\n\\n Parameters\\n ----------\\n obs : `~gammapy.spectrum.SpectrumObservation`\\n Spectrum observation\\n\\n Attributes\\n ----------\\n obs : `~gammapy.spectrum.SpectrumObservation`\\n Spectrum observation data\\n groups : `~gammapy.spectrum.SpectrumEnergyGroups`\\n List of energy groups\\n\\n See also\\n --------\\n SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator\\n \\\"\\\"\\\"\\n\\n def __init__(self, obs):\\n self.obs = obs\\n self.groups = None\\n\\n def groups_from_obs(self):\\n \\\"\\\"\\\"Compute energy groups with one group per energy bin.\\\"\\\"\\\"\\n ebounds_obs = self.obs.e_reco\\n size = ebounds_obs.nbins\\n table = Table()\\n table['bin_idx'] = np.arange(size)\\n table['energy_group_idx'] = np.arange(size)\\n table['bin_type'] = ['normal'] * size\\n table['energy_min'] = ebounds_obs.lower_bounds\\n table['energy_max'] = ebounds_obs.upper_bounds\\n self.groups = SpectrumEnergyGroups.from_total_table(table)\\n\\n def compute_groups_fixed(self, ebounds):\\n \\\"\\\"\\\"Apply grouping for a given fixed energy binning.\\n\\n This groups the observation ``obs.e_reco`` binning and\\n ``ebounds`` using a nearest neighbor match on the bin edges.\\n\\n Parameters\\n ----------\\n ebounds : `~astropy.units.Quantity`\\n Energy bounds array\\n \\\"\\\"\\\"\\n ebounds_src = self.obs.e_reco\\n bin_edges_src = np.arange(len(ebounds_src))\\n\\n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\\n bin_edges = np.round(temp, decimals=0).astype(np.int)\\n\\n # Check for duplicates\\n duplicates_removed = set(bin_edges)\\n if len(duplicates_removed) != len(bin_edges):\\n warn_str = \\\"Input binning\\\\n{}\\\\n contains bins that are finer than the\\\"\\n warn_str += \\\" target binning\\\\n{}\\\\n or outside the valid range\\\"\\n log.warning(warn_str.format(ebounds, ebounds_src))\\n bin_edges = sorted(duplicates_removed)\\n\\n # Create normal bins\\n groups = []\\n for idx in np.arange(len(bin_edges) - 1):\\n group = SpectrumEnergyGroup(\\n energy_group_idx=-1,\\n bin_idx_min=bin_edges[idx],\\n bin_idx_max=bin_edges[idx + 1] - 1,\\n bin_type='normal',\\n energy_min=ebounds_src[bin_edges[idx]],\\n energy_max=ebounds_src[bin_edges[idx + 1]],\\n )\\n groups.append(group)\\n\\n # Add underflow bin\\n start_edge = groups[0].bin_idx_min\\n if start_edge != 0:\\n underflow = SpectrumEnergyGroup(\\n energy_group_idx=-1,\\n bin_idx_min=0,\\n bin_idx_max=start_edge - 1,\\n bin_type='underflow',\\n energy_min=ebounds_src[0],\\n energy_max=ebounds_src[start_edge],\\n )\\n groups.insert(0, underflow)\\n\\n # Add overflow bin\\n end_edge = groups[-1].bin_idx_max\\n if end_edge != ebounds_src.nbins - 1:\\n overflow = SpectrumEnergyGroup(\\n energy_group_idx=-1,\\n bin_idx_min=end_edge + 1,\\n bin_idx_max=ebounds_src.nbins - 1,\\n bin_type='overflow',\\n energy_min=ebounds_src[end_edge + 1],\\n energy_max=ebounds_src[-1],\\n )\\n groups.append(overflow)\\n\\n # Set energy_group_idx\\n for group_idx, group in enumerate(groups):\\n group.energy_group_idx = group_idx\\n\\n self.groups = SpectrumEnergyGroups(groups)\\n\", \"path\": \"gammapy/spectrum/energy_group.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3983,"string":"3,983"},"num_tokens_diff":{"kind":"number","value":242,"string":"242"}}},{"rowIdx":18168,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_38843"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"lutris__lutris-1049"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nRestore monitor refresh rate\nMy monitor's default refresh rate is 60Hz, but I set it to 72Hz. When game exits, lutris does not restore monitor refresh rate to 72Hz. Is this feature available? If not can it be added?\n\n\n\n[start of lutris/util/display.py]\n1 import re\n2 import time\n3 import subprocess\n4 \n5 from lutris.util import system\n6 from lutris.util.log import logger\n7 \n8 XRANDR_CACHE = None\n9 XRANDR_CACHE_SET_AT = None\n10 XGAMMA_FOUND = None\n11 \n12 \n13 def cached(function):\n14 def wrapper():\n15 global XRANDR_CACHE\n16 global XRANDR_CACHE_SET_AT\n17 \n18 if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60:\n19 return XRANDR_CACHE\n20 XRANDR_CACHE = function()\n21 XRANDR_CACHE_SET_AT = time.time()\n22 return XRANDR_CACHE\n23 return wrapper\n24 \n25 \n26 @cached\n27 def get_vidmodes():\n28 xrandr_output = subprocess.Popen([\"xrandr\"],\n29 stdout=subprocess.PIPE).communicate()[0]\n30 return list([line for line in xrandr_output.decode().split(\"\\n\")])\n31 \n32 \n33 def get_outputs():\n34 \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n35 outputs = []\n36 vid_modes = get_vidmodes()\n37 if not vid_modes:\n38 logger.error(\"xrandr didn't return anything\")\n39 return []\n40 for line in vid_modes:\n41 parts = line.split()\n42 if len(parts) < 2:\n43 continue\n44 if parts[1] == 'connected':\n45 if len(parts) == 2:\n46 continue\n47 if parts[2] != 'primary':\n48 geom = parts[2]\n49 rotate = parts[3]\n50 else:\n51 geom = parts[3]\n52 rotate = parts[4]\n53 if geom.startswith('('): # Screen turned off, no geometry\n54 continue\n55 if rotate.startswith('('): # Screen not rotated, no need to include\n56 outputs.append((parts[0], geom, \"normal\"))\n57 else:\n58 if rotate in (\"left\", \"right\"):\n59 geom_parts = geom.split('+')\n60 x_y = geom_parts[0].split('x')\n61 geom = \"{}x{}+{}+{}\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\n62 outputs.append((parts[0], geom, rotate))\n63 return outputs\n64 \n65 \n66 def get_output_names():\n67 return [output[0] for output in get_outputs()]\n68 \n69 \n70 def turn_off_except(display):\n71 for output in get_outputs():\n72 if output[0] != display:\n73 subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n74 \n75 \n76 def get_resolutions():\n77 \"\"\"Return the list of supported screen resolutions.\"\"\"\n78 resolution_list = []\n79 for line in get_vidmodes():\n80 if line.startswith(\" \"):\n81 resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n82 if resolution_match:\n83 resolution_list.append(resolution_match.groups()[0])\n84 return resolution_list\n85 \n86 \n87 def get_unique_resolutions():\n88 \"\"\"Return available resolutions, without duplicates and ordered with highest resolution first\"\"\"\n89 return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True)\n90 \n91 \n92 def get_current_resolution(monitor=0):\n93 \"\"\"Return the current resolution for the desktop.\"\"\"\n94 resolution = list()\n95 for line in get_vidmodes():\n96 if line.startswith(\" \") and \"*\" in line:\n97 resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n98 if resolution_match:\n99 resolution.append(resolution_match.groups()[0])\n100 if monitor == 'all':\n101 return resolution\n102 else:\n103 return resolution[monitor]\n104 \n105 \n106 def change_resolution(resolution):\n107 \"\"\"Change display resolution.\n108 \n109 Takes a string for single monitors or a list of displays as returned\n110 by get_outputs().\n111 \"\"\"\n112 if not resolution:\n113 logger.warning(\"No resolution provided\")\n114 return\n115 if isinstance(resolution, str):\n116 logger.debug(\"Switching resolution to %s\", resolution)\n117 \n118 if resolution not in get_resolutions():\n119 logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n120 else:\n121 subprocess.Popen([\"xrandr\", \"-s\", resolution])\n122 else:\n123 for display in resolution:\n124 display_name = display[0]\n125 logger.debug(\"Switching to %s on %s\", display[1], display[0])\n126 display_geom = display[1].split('+')\n127 display_resolution = display_geom[0]\n128 position = (display_geom[1], display_geom[2])\n129 \n130 if (\n131 len(display) > 2 and\n132 display[2] in ('normal', 'left', 'right', 'inverted')\n133 ):\n134 rotation = display[2]\n135 else:\n136 rotation = \"normal\"\n137 \n138 subprocess.Popen([\n139 \"xrandr\",\n140 \"--output\", display_name,\n141 \"--mode\", display_resolution,\n142 \"--pos\", \"{}x{}\".format(position[0], position[1]),\n143 \"--rotate\", rotation\n144 ]).communicate()\n145 \n146 \n147 def restore_gamma():\n148 \"\"\"Restores gamma to a normal level.\"\"\"\n149 global XGAMMA_FOUND\n150 if XGAMMA_FOUND is None:\n151 XGAMMA_FOUND = bool(system.find_executable('xgamma'))\n152 if XGAMMA_FOUND is True:\n153 subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n154 else:\n155 logger.warning('xgamma is not available on your system')\n156 \n157 \n158 def get_xrandr_version():\n159 \"\"\"Return the major and minor version of XRandR utility\"\"\"\n160 pattern = \"version\"\n161 xrandr_output = subprocess.Popen([\"xrandr\", \"--version\"],\n162 stdout=subprocess.PIPE).communicate()[0].decode()\n163 position = xrandr_output.find(pattern) + len(pattern)\n164 version_str = xrandr_output[position:].strip().split(\".\")\n165 try:\n166 return {\"major\": int(version_str[0]), \"minor\": int(version_str[1])}\n167 except ValueError:\n168 logger.error(\"Can't find version in: %s\", xrandr_output)\n169 return {\"major\": 0, \"minor\": 0}\n170 \n171 \n172 def get_providers():\n173 \"\"\"Return the list of available graphic cards\"\"\"\n174 pattern = \"name:\"\n175 providers = list()\n176 version = get_xrandr_version()\n177 \n178 if version[\"major\"] == 1 and version[\"minor\"] >= 4:\n179 xrandr_output = subprocess.Popen([\"xrandr\", \"--listproviders\"],\n180 stdout=subprocess.PIPE).communicate()[0].decode()\n181 for line in xrandr_output.split(\"\\n\"):\n182 if line.find(\"Provider \") != 0:\n183 continue\n184 position = line.find(pattern) + len(pattern)\n185 providers.append(line[position:].strip())\n186 \n187 return providers\n188\n[end of lutris/util/display.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -34,6 +34,11 @@\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n+ display=None\n+ mode=None\n+ position=None\n+ rotate=None\n+ refresh_rate=None\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n@@ -53,16 +58,18 @@\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n if rotate.startswith('('): # Screen not rotated, no need to include\n- outputs.append((parts[0], geom, \"normal\"))\n- else:\n- if rotate in (\"left\", \"right\"):\n- geom_parts = geom.split('+')\n- x_y = geom_parts[0].split('x')\n- geom = \"{}x{}+{}+{}\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\n- outputs.append((parts[0], geom, rotate))\n+ rotate=\"normal\"\n+ geo_split=geom.split('+')\n+ position=geo_split[1] + \"x\" + geo_split[2]\n+ display=parts[0]\n+ elif '*' in line:\n+ mode=parts[0]\n+ for number in parts:\n+ if '*' in number:\n+ refresh_rate=number[:5]\n+ outputs.append((display, mode, position, rotate, refresh_rate))\n return outputs\n \n-\n def get_output_names():\n return [output[0] for output in get_outputs()]\n \n@@ -123,24 +130,25 @@\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n- display_geom = display[1].split('+')\n- display_resolution = display_geom[0]\n- position = (display_geom[1], display_geom[2])\n+ display_mode=display[1]\n+ position=display[2]\n+ refresh_rate=display[4]\n \n if (\n len(display) > 2 and\n- display[2] in ('normal', 'left', 'right', 'inverted')\n+ display[3] in ('normal', 'left', 'right', 'inverted')\n ):\n- rotation = display[2]\n+ rotation = display[3]\n else:\n rotation = \"normal\"\n \n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n- \"--mode\", display_resolution,\n- \"--pos\", \"{}x{}\".format(position[0], position[1]),\n- \"--rotate\", rotation\n+ \"--mode\", display_mode,\n+ \"--pos\", position,\n+ \"--rotate\", rotation,\n+ \"--rate\", refresh_rate\n ]).communicate()\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/lutris/util/display.py b/lutris/util/display.py\\n--- a/lutris/util/display.py\\n+++ b/lutris/util/display.py\\n@@ -34,6 +34,11 @@\\n \\\"\\\"\\\"Return list of tuples containing output name and geometry.\\\"\\\"\\\"\\n outputs = []\\n vid_modes = get_vidmodes()\\n+ display=None\\n+ mode=None\\n+ position=None\\n+ rotate=None\\n+ refresh_rate=None\\n if not vid_modes:\\n logger.error(\\\"xrandr didn't return anything\\\")\\n return []\\n@@ -53,16 +58,18 @@\\n if geom.startswith('('): # Screen turned off, no geometry\\n continue\\n if rotate.startswith('('): # Screen not rotated, no need to include\\n- outputs.append((parts[0], geom, \\\"normal\\\"))\\n- else:\\n- if rotate in (\\\"left\\\", \\\"right\\\"):\\n- geom_parts = geom.split('+')\\n- x_y = geom_parts[0].split('x')\\n- geom = \\\"{}x{}+{}+{}\\\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\\n- outputs.append((parts[0], geom, rotate))\\n+ rotate=\\\"normal\\\"\\n+ geo_split=geom.split('+')\\n+ position=geo_split[1] + \\\"x\\\" + geo_split[2]\\n+ display=parts[0]\\n+ elif '*' in line:\\n+ mode=parts[0]\\n+ for number in parts:\\n+ if '*' in number:\\n+ refresh_rate=number[:5]\\n+ outputs.append((display, mode, position, rotate, refresh_rate))\\n return outputs\\n \\n-\\n def get_output_names():\\n return [output[0] for output in get_outputs()]\\n \\n@@ -123,24 +130,25 @@\\n for display in resolution:\\n display_name = display[0]\\n logger.debug(\\\"Switching to %s on %s\\\", display[1], display[0])\\n- display_geom = display[1].split('+')\\n- display_resolution = display_geom[0]\\n- position = (display_geom[1], display_geom[2])\\n+ display_mode=display[1]\\n+ position=display[2]\\n+ refresh_rate=display[4]\\n \\n if (\\n len(display) > 2 and\\n- display[2] in ('normal', 'left', 'right', 'inverted')\\n+ display[3] in ('normal', 'left', 'right', 'inverted')\\n ):\\n- rotation = display[2]\\n+ rotation = display[3]\\n else:\\n rotation = \\\"normal\\\"\\n \\n subprocess.Popen([\\n \\\"xrandr\\\",\\n \\\"--output\\\", display_name,\\n- \\\"--mode\\\", display_resolution,\\n- \\\"--pos\\\", \\\"{}x{}\\\".format(position[0], position[1]),\\n- \\\"--rotate\\\", rotation\\n+ \\\"--mode\\\", display_mode,\\n+ \\\"--pos\\\", position,\\n+ \\\"--rotate\\\", rotation,\\n+ \\\"--rate\\\", refresh_rate\\n ]).communicate()\\n\", \"issue\": \"Restore monitor refresh rate\\nMy monitor's default refresh rate is 60Hz, but I set it to 72Hz. When game exits, lutris does not restore monitor refresh rate to 72Hz. Is this feature available? If not can it be added?\\n\", \"before_files\": [{\"content\": \"import re\\nimport time\\nimport subprocess\\n\\nfrom lutris.util import system\\nfrom lutris.util.log import logger\\n\\nXRANDR_CACHE = None\\nXRANDR_CACHE_SET_AT = None\\nXGAMMA_FOUND = None\\n\\n\\ndef cached(function):\\n def wrapper():\\n global XRANDR_CACHE\\n global XRANDR_CACHE_SET_AT\\n\\n if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60:\\n return XRANDR_CACHE\\n XRANDR_CACHE = function()\\n XRANDR_CACHE_SET_AT = time.time()\\n return XRANDR_CACHE\\n return wrapper\\n\\n\\n@cached\\ndef get_vidmodes():\\n xrandr_output = subprocess.Popen([\\\"xrandr\\\"],\\n stdout=subprocess.PIPE).communicate()[0]\\n return list([line for line in xrandr_output.decode().split(\\\"\\\\n\\\")])\\n\\n\\ndef get_outputs():\\n \\\"\\\"\\\"Return list of tuples containing output name and geometry.\\\"\\\"\\\"\\n outputs = []\\n vid_modes = get_vidmodes()\\n if not vid_modes:\\n logger.error(\\\"xrandr didn't return anything\\\")\\n return []\\n for line in vid_modes:\\n parts = line.split()\\n if len(parts) < 2:\\n continue\\n if parts[1] == 'connected':\\n if len(parts) == 2:\\n continue\\n if parts[2] != 'primary':\\n geom = parts[2]\\n rotate = parts[3]\\n else:\\n geom = parts[3]\\n rotate = parts[4]\\n if geom.startswith('('): # Screen turned off, no geometry\\n continue\\n if rotate.startswith('('): # Screen not rotated, no need to include\\n outputs.append((parts[0], geom, \\\"normal\\\"))\\n else:\\n if rotate in (\\\"left\\\", \\\"right\\\"):\\n geom_parts = geom.split('+')\\n x_y = geom_parts[0].split('x')\\n geom = \\\"{}x{}+{}+{}\\\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\\n outputs.append((parts[0], geom, rotate))\\n return outputs\\n\\n\\ndef get_output_names():\\n return [output[0] for output in get_outputs()]\\n\\n\\ndef turn_off_except(display):\\n for output in get_outputs():\\n if output[0] != display:\\n subprocess.Popen([\\\"xrandr\\\", \\\"--output\\\", output[0], \\\"--off\\\"])\\n\\n\\ndef get_resolutions():\\n \\\"\\\"\\\"Return the list of supported screen resolutions.\\\"\\\"\\\"\\n resolution_list = []\\n for line in get_vidmodes():\\n if line.startswith(\\\" \\\"):\\n resolution_match = re.match('.*?(\\\\d+x\\\\d+).*', line)\\n if resolution_match:\\n resolution_list.append(resolution_match.groups()[0])\\n return resolution_list\\n\\n\\ndef get_unique_resolutions():\\n \\\"\\\"\\\"Return available resolutions, without duplicates and ordered with highest resolution first\\\"\\\"\\\"\\n return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True)\\n\\n\\ndef get_current_resolution(monitor=0):\\n \\\"\\\"\\\"Return the current resolution for the desktop.\\\"\\\"\\\"\\n resolution = list()\\n for line in get_vidmodes():\\n if line.startswith(\\\" \\\") and \\\"*\\\" in line:\\n resolution_match = re.match('.*?(\\\\d+x\\\\d+).*', line)\\n if resolution_match:\\n resolution.append(resolution_match.groups()[0])\\n if monitor == 'all':\\n return resolution\\n else:\\n return resolution[monitor]\\n\\n\\ndef change_resolution(resolution):\\n \\\"\\\"\\\"Change display resolution.\\n\\n Takes a string for single monitors or a list of displays as returned\\n by get_outputs().\\n \\\"\\\"\\\"\\n if not resolution:\\n logger.warning(\\\"No resolution provided\\\")\\n return\\n if isinstance(resolution, str):\\n logger.debug(\\\"Switching resolution to %s\\\", resolution)\\n\\n if resolution not in get_resolutions():\\n logger.warning(\\\"Resolution %s doesn't exist.\\\" % resolution)\\n else:\\n subprocess.Popen([\\\"xrandr\\\", \\\"-s\\\", resolution])\\n else:\\n for display in resolution:\\n display_name = display[0]\\n logger.debug(\\\"Switching to %s on %s\\\", display[1], display[0])\\n display_geom = display[1].split('+')\\n display_resolution = display_geom[0]\\n position = (display_geom[1], display_geom[2])\\n\\n if (\\n len(display) > 2 and\\n display[2] in ('normal', 'left', 'right', 'inverted')\\n ):\\n rotation = display[2]\\n else:\\n rotation = \\\"normal\\\"\\n\\n subprocess.Popen([\\n \\\"xrandr\\\",\\n \\\"--output\\\", display_name,\\n \\\"--mode\\\", display_resolution,\\n \\\"--pos\\\", \\\"{}x{}\\\".format(position[0], position[1]),\\n \\\"--rotate\\\", rotation\\n ]).communicate()\\n\\n\\ndef restore_gamma():\\n \\\"\\\"\\\"Restores gamma to a normal level.\\\"\\\"\\\"\\n global XGAMMA_FOUND\\n if XGAMMA_FOUND is None:\\n XGAMMA_FOUND = bool(system.find_executable('xgamma'))\\n if XGAMMA_FOUND is True:\\n subprocess.Popen([\\\"xgamma\\\", \\\"-gamma\\\", \\\"1.0\\\"])\\n else:\\n logger.warning('xgamma is not available on your system')\\n\\n\\ndef get_xrandr_version():\\n \\\"\\\"\\\"Return the major and minor version of XRandR utility\\\"\\\"\\\"\\n pattern = \\\"version\\\"\\n xrandr_output = subprocess.Popen([\\\"xrandr\\\", \\\"--version\\\"],\\n stdout=subprocess.PIPE).communicate()[0].decode()\\n position = xrandr_output.find(pattern) + len(pattern)\\n version_str = xrandr_output[position:].strip().split(\\\".\\\")\\n try:\\n return {\\\"major\\\": int(version_str[0]), \\\"minor\\\": int(version_str[1])}\\n except ValueError:\\n logger.error(\\\"Can't find version in: %s\\\", xrandr_output)\\n return {\\\"major\\\": 0, \\\"minor\\\": 0}\\n\\n\\ndef get_providers():\\n \\\"\\\"\\\"Return the list of available graphic cards\\\"\\\"\\\"\\n pattern = \\\"name:\\\"\\n providers = list()\\n version = get_xrandr_version()\\n\\n if version[\\\"major\\\"] == 1 and version[\\\"minor\\\"] >= 4:\\n xrandr_output = subprocess.Popen([\\\"xrandr\\\", \\\"--listproviders\\\"],\\n stdout=subprocess.PIPE).communicate()[0].decode()\\n for line in xrandr_output.split(\\\"\\\\n\\\"):\\n if line.find(\\\"Provider \\\") != 0:\\n continue\\n position = line.find(pattern) + len(pattern)\\n providers.append(line[position:].strip())\\n\\n return providers\\n\", \"path\": \"lutris/util/display.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2448,"string":"2,448"},"num_tokens_diff":{"kind":"number","value":665,"string":"665"}}},{"rowIdx":18169,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_20686"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"alltheplaces__alltheplaces-3325"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nSpider labreweries is broken\nDuring the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))\n\n\n\n[start of locations/spiders/labreweries.py]\n1 # -*- coding: utf-8 -*-\n2 import scrapy\n3 import re\n4 \n5 from locations.items import GeojsonPointItem\n6 \n7 \n8 class LaBreweriesSpider(scrapy.Spider):\n9 name = \"labreweries\"\n10 allowed_domains = [\"labeerhop.com\"]\n11 start_urls = (\n12 'http://labeerhop.com/breweries-sitemap.xml',\n13 )\n14 \n15 def store_hours(self, store_hours):\n16 day_groups = []\n17 this_day_group = None\n18 for day in store_hours:\n19 day = day.replace(' :-', ' 12:00 -')\n20 day = day.split('
Hours
')[1].strip('
').strip('')\n21 match = re.search(r'(closed|(\\d{1,2})\\S.\\s*-\\s*(\\d{1,2})\\S.)', day.lower())\n22 open('/tmp/test1.txt', 'w').write(str(day))\n23 (dow, f_hr, t_hr) = match.groups()\n24 day_short = dow[:2]\n25 \n26 f_hr = int(f_hr)\n27 t_hr = int(t_hr)\n28 \n29 hours = '{:02d}-{:02d}'.format(\n30 f_hr,\n31 t_hr,\n32 )\n33 \n34 if not this_day_group:\n35 this_day_group = {\n36 'from_day': day_short,\n37 'to_day': day_short,\n38 'hours': hours\n39 }\n40 elif this_day_group['hours'] != hours:\n41 day_groups.append(this_day_group)\n42 this_day_group = {\n43 'from_day': day_short,\n44 'to_day': day_short,\n45 'hours': hours\n46 }\n47 elif this_day_group['hours'] == hours:\n48 this_day_group['to_day'] = day_short\n49 \n50 day_groups.append(this_day_group)\n51 \n52 opening_hours = \"\"\n53 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n54 opening_hours = '24/7'\n55 else:\n56 for day_group in day_groups:\n57 if day_group['from_day'] == day_group['to_day']:\n58 opening_hours += '{from_day} {hours}; '.format(**day_group)\n59 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n60 opening_hours += '{hours}; '.format(**day_group)\n61 else:\n62 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n63 opening_hours = opening_hours[:-2]\n64 \n65 return opening_hours\n66 \n67 def address(self, address):\n68 if not address:\n69 return None\n70 \n71 addr_tags = {\n72 \"addr_full\": address[0].split(',')[0].strip(),\n73 \"city\": address[0].split(',')[1].strip(),\n74 \"state\": address[0].split(' ')[-2].strip(),\n75 \"postcode\": address[0].split(' ')[-1].strip(),\n76 }\n77 \n78 return addr_tags\n79 \n80 def parse(self, response):\n81 response.selector.remove_namespaces()\n82 city_urls = response.xpath('//url/loc/text()').extract()\n83 for path in city_urls:\n84 if path not in \"http://labeerhop.com/breweries/1056/\":\n85 yield scrapy.Request(\n86 path.strip(),\n87 callback=self.parse_store,\n88 )\n89 \n90 def parse_store(self, response):\n91 \n92 properties = {\n93 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n94 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n95 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('
Hours
')[1].replace('
','').replace('','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n96 # 'lon': float(data['geo']['longitude']), # not lon on page\n97 # 'lat': float(data['geo']['latitude']), # not lat on page\n98 }\n99 \n100 address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\n101 if address:\n102 properties.update(address)\n103 \n104 \n105 yield GeojsonPointItem(**properties)\n106\n[end of locations/spiders/labreweries.py]\n
\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py\n--- a/locations/spiders/labreweries.py\n+++ b/locations/spiders/labreweries.py\n@@ -92,7 +92,7 @@\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n- 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('
Hours
')[1].replace('
','').replace('','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n+ 'opening_hours': re.sub(r'\\s+', ' ', response.xpath('//*[@id=\"content\"]/div/div[2]/div[3]').extract()[0].split('
Hours
')[1].replace('
','').replace('
','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py\\n--- a/locations/spiders/labreweries.py\\n+++ b/locations/spiders/labreweries.py\\n@@ -92,7 +92,7 @@\\n properties = {\\n 'website': response.xpath('//head/link[@rel=\\\"canonical\\\"]/@href').extract_first(),\\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\\\"['']\\\"),\\n- 'opening_hours': re.sub(r'\\\\s+', ' ', response.css('#secondary').extract()[0].split('
Hours
')[1].replace('
','').replace('','').replace('\\\\t',' ').replace('\\\\n','').replace('\\\\r',' ')).strip(),\\n+ 'opening_hours': re.sub(r'\\\\s+', ' ', response.xpath('//*[@id=\\\"content\\\"]/div/div[2]/div[3]').extract()[0].split('
Hours
')[1].replace('
','').replace('
','').replace('\\\\t',' ').replace('\\\\n','').replace('\\\\r',' ')).strip(),\\n # 'lon': float(data['geo']['longitude']), # not lon on page\\n # 'lat': float(data['geo']['latitude']), # not lat on page\\n }\\n\", \"issue\": \"Spider labreweries is broken\\nDuring the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.\\n\\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))\\n\", \"before_files\": [{\"content\": \"# -*- coding: utf-8 -*-\\nimport scrapy\\nimport re\\n\\nfrom locations.items import GeojsonPointItem\\n\\n\\nclass LaBreweriesSpider(scrapy.Spider):\\n name = \\\"labreweries\\\"\\n allowed_domains = [\\\"labeerhop.com\\\"]\\n start_urls = (\\n 'http://labeerhop.com/breweries-sitemap.xml',\\n )\\n\\n def store_hours(self, store_hours):\\n day_groups = []\\n this_day_group = None\\n for day in store_hours:\\n day = day.replace(' :-', ' 12:00 -')\\n day = day.split('
Hours
')[1].strip('
').strip('')\\n match = re.search(r'(closed|(\\\\d{1,2})\\\\S.\\\\s*-\\\\s*(\\\\d{1,2})\\\\S.)', day.lower())\\n open('/tmp/test1.txt', 'w').write(str(day))\\n (dow, f_hr, t_hr) = match.groups()\\n day_short = dow[:2]\\n\\n f_hr = int(f_hr)\\n t_hr = int(t_hr)\\n\\n hours = '{:02d}-{:02d}'.format(\\n f_hr,\\n t_hr,\\n )\\n\\n if not this_day_group:\\n this_day_group = {\\n 'from_day': day_short,\\n 'to_day': day_short,\\n 'hours': hours\\n }\\n elif this_day_group['hours'] != hours:\\n day_groups.append(this_day_group)\\n this_day_group = {\\n 'from_day': day_short,\\n 'to_day': day_short,\\n 'hours': hours\\n }\\n elif this_day_group['hours'] == hours:\\n this_day_group['to_day'] = day_short\\n\\n day_groups.append(this_day_group)\\n\\n opening_hours = \\\"\\\"\\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\\n opening_hours = '24/7'\\n else:\\n for day_group in day_groups:\\n if day_group['from_day'] == day_group['to_day']:\\n opening_hours += '{from_day} {hours}; '.format(**day_group)\\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\\n opening_hours += '{hours}; '.format(**day_group)\\n else:\\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\\n opening_hours = opening_hours[:-2]\\n\\n return opening_hours\\n\\n def address(self, address):\\n if not address:\\n return None\\n\\n addr_tags = {\\n \\\"addr_full\\\": address[0].split(',')[0].strip(),\\n \\\"city\\\": address[0].split(',')[1].strip(),\\n \\\"state\\\": address[0].split(' ')[-2].strip(),\\n \\\"postcode\\\": address[0].split(' ')[-1].strip(),\\n }\\n\\n return addr_tags\\n\\n def parse(self, response):\\n response.selector.remove_namespaces()\\n city_urls = response.xpath('//url/loc/text()').extract()\\n for path in city_urls:\\n if path not in \\\"http://labeerhop.com/breweries/1056/\\\":\\n yield scrapy.Request(\\n path.strip(),\\n callback=self.parse_store,\\n )\\n\\n def parse_store(self, response):\\n\\n properties = {\\n 'website': response.xpath('//head/link[@rel=\\\"canonical\\\"]/@href').extract_first(),\\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\\\"['']\\\"),\\n 'opening_hours': re.sub(r'\\\\s+', ' ', response.css('#secondary').extract()[0].split('
Hours
')[1].replace('
','').replace('','').replace('\\\\t',' ').replace('\\\\n','').replace('\\\\r',' ')).strip(),\\n # 'lon': float(data['geo']['longitude']), # not lon on page\\n # 'lat': float(data['geo']['latitude']), # not lat on page\\n }\\n\\n address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\\n if address:\\n properties.update(address)\\n\\n\\n yield GeojsonPointItem(**properties)\\n\", \"path\": \"locations/spiders/labreweries.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1887,"string":"1,887"},"num_tokens_diff":{"kind":"number","value":299,"string":"299"}}},{"rowIdx":18170,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_3566"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"sopel-irc__sopel-1417"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nreddit: support all reddit subdomains\n#1397 adds support for links to `old.reddit.com`, but any two-letter subdomain of `reddit.com` is valid as well. Reddit uses these for internationalization (e.g. `it.reddit.com` -> Italian UI) and also to allow subreddits to add custom styles (a common example is using `np.reddit.com` to trigger a \"no participation\" stylesheet that hides voting arrows).\r\n\r\nSopel's reddit module should support these subdomains.\r\n\r\nThere are also really old three-letter subdomains (`pay`, `ssl`) that still work. Most three-letter combos I tried redirect to `www.reddit.com/r/combohere` though.\n\n\n\n[start of sopel/modules/reddit.py]\n1 # coding=utf-8\n2 # Author: Elsie Powell, embolalia.com\n3 from __future__ import unicode_literals, absolute_import, print_function, division\n4 \n5 from sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\n6 from sopel.formatting import bold, color, colors\n7 from sopel.web import USER_AGENT\n8 from sopel.tools import SopelMemory, time\n9 import datetime as dt\n10 import praw\n11 import re\n12 import sys\n13 if sys.version_info.major >= 3:\n14 unicode = str\n15 if sys.version_info.minor >= 4:\n16 from html import unescape\n17 else:\n18 from html.parser import HTMLParser\n19 unescape = HTMLParser().unescape\n20 else:\n21 from HTMLParser import HTMLParser\n22 unescape = HTMLParser().unescape\n23 \n24 \n25 domain = r'https?://(?:www\\.|np\\.|old\\.)?reddit\\.com'\n26 post_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\n27 user_url = r'%s/u(ser)?/([\\w-]+)' % domain\n28 post_regex = re.compile(post_url)\n29 user_regex = re.compile(user_url)\n30 spoiler_subs = [\n31 'stevenuniverse',\n32 'onepunchman',\n33 ]\n34 \n35 \n36 def setup(bot):\n37 if not bot.memory.contains('url_callbacks'):\n38 bot.memory['url_callbacks'] = SopelMemory()\n39 bot.memory['url_callbacks'][post_regex] = rpost_info\n40 bot.memory['url_callbacks'][user_regex] = redditor_info\n41 \n42 \n43 def shutdown(bot):\n44 del bot.memory['url_callbacks'][post_regex]\n45 del bot.memory['url_callbacks'][user_regex]\n46 \n47 \n48 @rule('.*%s.*' % post_url)\n49 def rpost_info(bot, trigger, match=None):\n50 match = match or trigger\n51 try:\n52 r = praw.Reddit(\n53 user_agent=USER_AGENT,\n54 client_id='6EiphT6SSQq7FQ',\n55 client_secret=None,\n56 )\n57 s = r.submission(id=match.group(2))\n58 except Exception:\n59 r = praw.Reddit(user_agent=USER_AGENT)\n60 s = r.get_submission(submission_id=match.group(2))\n61 \n62 message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n63 '{comments} comments | Posted by {author} | '\n64 'Created at {created}')\n65 \n66 subreddit = s.subreddit.display_name\n67 if s.is_self:\n68 link = '(self.{})'.format(subreddit)\n69 else:\n70 link = '({}) to r/{}'.format(s.url, subreddit)\n71 \n72 if s.over_18:\n73 if subreddit.lower() in spoiler_subs:\n74 nsfw = bold(color(' [SPOILERS]', colors.RED))\n75 else:\n76 nsfw = bold(color(' [NSFW]', colors.RED))\n77 \n78 sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n79 if sfw:\n80 link = '(link hidden)'\n81 bot.write(['KICK', trigger.sender, trigger.nick,\n82 'Linking to NSFW content in a SFW channel.'])\n83 else:\n84 nsfw = ''\n85 \n86 if s.author:\n87 author = s.author.name\n88 else:\n89 author = '[deleted]'\n90 \n91 tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n92 trigger.sender)\n93 time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n94 created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n95 trigger.sender, time_created)\n96 \n97 if s.score > 0:\n98 point_color = colors.GREEN\n99 else:\n100 point_color = colors.RED\n101 \n102 percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n103 \n104 title = unescape(s.title)\n105 message = message.format(\n106 title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n107 comments=s.num_comments, author=author, created=created)\n108 \n109 bot.say(message)\n110 \n111 \n112 # If you change this, you'll have to change some other things...\n113 @commands('redditor')\n114 @example('.redditor poem_for_your_sprog')\n115 def redditor_info(bot, trigger, match=None):\n116 \"\"\"Show information about the given Redditor\"\"\"\n117 commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n118 r = praw.Reddit(\n119 user_agent=USER_AGENT,\n120 client_id='6EiphT6SSQq7FQ',\n121 client_secret=None,\n122 )\n123 match = match or trigger\n124 try:\n125 u = r.get_redditor(match.group(2))\n126 except Exception: # TODO: Be specific\n127 if commanded:\n128 bot.say('No such Redditor.')\n129 return NOLIMIT\n130 else:\n131 return\n132 # Fail silently if it wasn't an explicit command.\n133 \n134 message = '[REDDITOR] ' + u.name\n135 now = dt.datetime.utcnow()\n136 cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n137 cakeday_start = cakeday_start.replace(year=now.year)\n138 day = dt.timedelta(days=1)\n139 year_div_by_400 = now.year % 400 == 0\n140 year_div_by_100 = now.year % 100 == 0\n141 year_div_by_4 = now.year % 4 == 0\n142 is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n143 if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n144 # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n145 # Cake day begins at exact account creation time.\n146 is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n147 else:\n148 is_cakeday = cakeday_start <= now <= cakeday_start + day\n149 \n150 if is_cakeday:\n151 message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n152 if commanded:\n153 message = message + ' | https://reddit.com/u/' + u.name\n154 if u.is_gold:\n155 message = message + ' | \u0002\u000308Gold\u0003\u0002'\n156 if u.is_mod:\n157 message = message + ' | \u0002\u000305Mod\u0003\u0002'\n158 message = message + (' | Link: ' + str(u.link_karma) +\n159 ' | Comment: ' + str(u.comment_karma))\n160 \n161 bot.say(message)\n162 \n163 \n164 # If you change the groups here, you'll have to change some things above.\n165 @rule('.*%s.*' % user_url)\n166 def auto_redditor_info(bot, trigger):\n167 redditor_info(bot, trigger)\n168 \n169 \n170 @require_chanmsg('.setsfw is only permitted in channels')\n171 @commands('setsafeforwork', 'setsfw')\n172 @example('.setsfw true')\n173 @example('.setsfw false')\n174 def update_channel(bot, trigger):\n175 \"\"\"\n176 Sets the Safe for Work status (true or false) for the current\n177 channel. Defaults to false.\n178 \"\"\"\n179 if bot.privileges[trigger.sender][trigger.nick] < OP:\n180 return\n181 else:\n182 param = 'true'\n183 if trigger.group(2) and trigger.group(3):\n184 param = trigger.group(3).strip().lower()\n185 sfw = param == 'true'\n186 bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n187 if sfw:\n188 bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n189 else:\n190 bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n191 \n192 \n193 @commands('getsafeforwork', 'getsfw')\n194 @example('.getsfw [channel]')\n195 def get_channel_sfw(bot, trigger):\n196 \"\"\"\n197 Gets the preferred channel's Safe for Work status, or the current\n198 channel's status if no channel given.\n199 \"\"\"\n200 channel = trigger.group(2)\n201 if not channel:\n202 channel = trigger.sender\n203 if channel.is_nick():\n204 return bot.say('.getsfw with no channel param is only permitted in channels')\n205 \n206 channel = channel.strip()\n207 \n208 sfw = bot.db.get_channel_value(channel, 'sfw')\n209 if sfw:\n210 bot.say('%s is flagged as SFW' % channel)\n211 else:\n212 bot.say('%s is flagged as NSFW' % channel)\n213\n[end of sopel/modules/reddit.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py\n--- a/sopel/modules/reddit.py\n+++ b/sopel/modules/reddit.py\n@@ -22,7 +22,7 @@\n unescape = HTMLParser().unescape\n \n \n-domain = r'https?://(?:www\\.|np\\.|old\\.)?reddit\\.com'\n+domain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\n post_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\n user_url = r'%s/u(ser)?/([\\w-]+)' % domain\n post_regex = re.compile(post_url)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py\\n--- a/sopel/modules/reddit.py\\n+++ b/sopel/modules/reddit.py\\n@@ -22,7 +22,7 @@\\n unescape = HTMLParser().unescape\\n \\n \\n-domain = r'https?://(?:www\\\\.|np\\\\.|old\\\\.)?reddit\\\\.com'\\n+domain = r'https?://(?:www\\\\.|old\\\\.|pay\\\\.|ssl\\\\.|[a-z]{2}\\\\.)?reddit\\\\.com'\\n post_url = r'%s/r/(.*?)/comments/([\\\\w-]+)' % domain\\n user_url = r'%s/u(ser)?/([\\\\w-]+)' % domain\\n post_regex = re.compile(post_url)\\n\", \"issue\": \"reddit: support all reddit subdomains\\n#1397 adds support for links to `old.reddit.com`, but any two-letter subdomain of `reddit.com` is valid as well. Reddit uses these for internationalization (e.g. `it.reddit.com` -> Italian UI) and also to allow subreddits to add custom styles (a common example is using `np.reddit.com` to trigger a \\\"no participation\\\" stylesheet that hides voting arrows).\\r\\n\\r\\nSopel's reddit module should support these subdomains.\\r\\n\\r\\nThere are also really old three-letter subdomains (`pay`, `ssl`) that still work. Most three-letter combos I tried redirect to `www.reddit.com/r/combohere` though.\\n\", \"before_files\": [{\"content\": \"# coding=utf-8\\n# Author: Elsie Powell, embolalia.com\\nfrom __future__ import unicode_literals, absolute_import, print_function, division\\n\\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\\nfrom sopel.formatting import bold, color, colors\\nfrom sopel.web import USER_AGENT\\nfrom sopel.tools import SopelMemory, time\\nimport datetime as dt\\nimport praw\\nimport re\\nimport sys\\nif sys.version_info.major >= 3:\\n unicode = str\\n if sys.version_info.minor >= 4:\\n from html import unescape\\n else:\\n from html.parser import HTMLParser\\n unescape = HTMLParser().unescape\\nelse:\\n from HTMLParser import HTMLParser\\n unescape = HTMLParser().unescape\\n\\n\\ndomain = r'https?://(?:www\\\\.|np\\\\.|old\\\\.)?reddit\\\\.com'\\npost_url = r'%s/r/(.*?)/comments/([\\\\w-]+)' % domain\\nuser_url = r'%s/u(ser)?/([\\\\w-]+)' % domain\\npost_regex = re.compile(post_url)\\nuser_regex = re.compile(user_url)\\nspoiler_subs = [\\n 'stevenuniverse',\\n 'onepunchman',\\n]\\n\\n\\ndef setup(bot):\\n if not bot.memory.contains('url_callbacks'):\\n bot.memory['url_callbacks'] = SopelMemory()\\n bot.memory['url_callbacks'][post_regex] = rpost_info\\n bot.memory['url_callbacks'][user_regex] = redditor_info\\n\\n\\ndef shutdown(bot):\\n del bot.memory['url_callbacks'][post_regex]\\n del bot.memory['url_callbacks'][user_regex]\\n\\n\\n@rule('.*%s.*' % post_url)\\ndef rpost_info(bot, trigger, match=None):\\n match = match or trigger\\n try:\\n r = praw.Reddit(\\n user_agent=USER_AGENT,\\n client_id='6EiphT6SSQq7FQ',\\n client_secret=None,\\n )\\n s = r.submission(id=match.group(2))\\n except Exception:\\n r = praw.Reddit(user_agent=USER_AGENT)\\n s = r.get_submission(submission_id=match.group(2))\\n\\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\\n '{comments} comments | Posted by {author} | '\\n 'Created at {created}')\\n\\n subreddit = s.subreddit.display_name\\n if s.is_self:\\n link = '(self.{})'.format(subreddit)\\n else:\\n link = '({}) to r/{}'.format(s.url, subreddit)\\n\\n if s.over_18:\\n if subreddit.lower() in spoiler_subs:\\n nsfw = bold(color(' [SPOILERS]', colors.RED))\\n else:\\n nsfw = bold(color(' [NSFW]', colors.RED))\\n\\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\\n if sfw:\\n link = '(link hidden)'\\n bot.write(['KICK', trigger.sender, trigger.nick,\\n 'Linking to NSFW content in a SFW channel.'])\\n else:\\n nsfw = ''\\n\\n if s.author:\\n author = s.author.name\\n else:\\n author = '[deleted]'\\n\\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\\n trigger.sender)\\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\\n trigger.sender, time_created)\\n\\n if s.score > 0:\\n point_color = colors.GREEN\\n else:\\n point_color = colors.RED\\n\\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\\n\\n title = unescape(s.title)\\n message = message.format(\\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\\n comments=s.num_comments, author=author, created=created)\\n\\n bot.say(message)\\n\\n\\n# If you change this, you'll have to change some other things...\\n@commands('redditor')\\n@example('.redditor poem_for_your_sprog')\\ndef redditor_info(bot, trigger, match=None):\\n \\\"\\\"\\\"Show information about the given Redditor\\\"\\\"\\\"\\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\\n r = praw.Reddit(\\n user_agent=USER_AGENT,\\n client_id='6EiphT6SSQq7FQ',\\n client_secret=None,\\n )\\n match = match or trigger\\n try:\\n u = r.get_redditor(match.group(2))\\n except Exception: # TODO: Be specific\\n if commanded:\\n bot.say('No such Redditor.')\\n return NOLIMIT\\n else:\\n return\\n # Fail silently if it wasn't an explicit command.\\n\\n message = '[REDDITOR] ' + u.name\\n now = dt.datetime.utcnow()\\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\\n cakeday_start = cakeday_start.replace(year=now.year)\\n day = dt.timedelta(days=1)\\n year_div_by_400 = now.year % 400 == 0\\n year_div_by_100 = now.year % 100 == 0\\n year_div_by_4 = now.year % 4 == 0\\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\\n # Cake day begins at exact account creation time.\\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\\n else:\\n is_cakeday = cakeday_start <= now <= cakeday_start + day\\n\\n if is_cakeday:\\n message = message + ' | \\u0002\\u000313Cake day\\u0003\\u0002'\\n if commanded:\\n message = message + ' | https://reddit.com/u/' + u.name\\n if u.is_gold:\\n message = message + ' | \\u0002\\u000308Gold\\u0003\\u0002'\\n if u.is_mod:\\n message = message + ' | \\u0002\\u000305Mod\\u0003\\u0002'\\n message = message + (' | Link: ' + str(u.link_karma) +\\n ' | Comment: ' + str(u.comment_karma))\\n\\n bot.say(message)\\n\\n\\n# If you change the groups here, you'll have to change some things above.\\n@rule('.*%s.*' % user_url)\\ndef auto_redditor_info(bot, trigger):\\n redditor_info(bot, trigger)\\n\\n\\n@require_chanmsg('.setsfw is only permitted in channels')\\n@commands('setsafeforwork', 'setsfw')\\n@example('.setsfw true')\\n@example('.setsfw false')\\ndef update_channel(bot, trigger):\\n \\\"\\\"\\\"\\n Sets the Safe for Work status (true or false) for the current\\n channel. Defaults to false.\\n \\\"\\\"\\\"\\n if bot.privileges[trigger.sender][trigger.nick] < OP:\\n return\\n else:\\n param = 'true'\\n if trigger.group(2) and trigger.group(3):\\n param = trigger.group(3).strip().lower()\\n sfw = param == 'true'\\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\\n if sfw:\\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\\n else:\\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\\n\\n\\n@commands('getsafeforwork', 'getsfw')\\n@example('.getsfw [channel]')\\ndef get_channel_sfw(bot, trigger):\\n \\\"\\\"\\\"\\n Gets the preferred channel's Safe for Work status, or the current\\n channel's status if no channel given.\\n \\\"\\\"\\\"\\n channel = trigger.group(2)\\n if not channel:\\n channel = trigger.sender\\n if channel.is_nick():\\n return bot.say('.getsfw with no channel param is only permitted in channels')\\n\\n channel = channel.strip()\\n\\n sfw = bot.db.get_channel_value(channel, 'sfw')\\n if sfw:\\n bot.say('%s is flagged as SFW' % channel)\\n else:\\n bot.say('%s is flagged as NSFW' % channel)\\n\", \"path\": \"sopel/modules/reddit.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3071,"string":"3,071"},"num_tokens_diff":{"kind":"number","value":163,"string":"163"}}},{"rowIdx":18171,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_47927"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"uccser__cs-unplugged-885"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nMinor adjustments to navbar and homepage\n## Navbar\r\n\r\n- [x] There should be more space between logo and 'Topics'.\r\n- [x] The search bar can be ~20% smaller.\r\n\r\n## Hompeage\r\n\r\n- [x] Navbar should be transparent and fade in when user scrolls down.\n\n\n\n[start of csunplugged/general/views.py]\n1 \"\"\"Views for the general application.\"\"\"\n2 \n3 from django.views.generic import TemplateView\n4 from django.http import HttpResponse\n5 \n6 \n7 class GeneralIndexView(TemplateView):\n8 \"\"\"View for the homepage that renders from a template.\"\"\"\n9 \n10 template_name = \"general/index.html\"\n11 \n12 \n13 class GeneralAboutView(TemplateView):\n14 \"\"\"View for the about page that renders from a template.\"\"\"\n15 \n16 template_name = \"general/about.html\"\n17 \n18 \n19 class GeneralContactView(TemplateView):\n20 \"\"\"View for the contact page that renders from a template.\"\"\"\n21 \n22 template_name = \"general/contact.html\"\n23 \n24 \n25 class GeneralPeopleView(TemplateView):\n26 \"\"\"View for the people page that renders from a template.\"\"\"\n27 \n28 template_name = \"general/people.html\"\n29 \n30 \n31 class GeneralPrinciplesView(TemplateView):\n32 \"\"\"View for the princples page that renders from a template.\"\"\"\n33 \n34 template_name = \"general/principles.html\"\n35 \n36 \n37 class WhatIsCSView(TemplateView):\n38 \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n39 \n40 template_name = \"general/what-is-computer-science.html\"\n41 \n42 \n43 class ComputationalThinkingView(TemplateView):\n44 \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n45 \n46 template_name = \"general/computational-thinking.html\"\n47 \n48 \n49 class HowDoITeachCSUnpluggedView(TemplateView):\n50 \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n51 \n52 template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n53 \n54 \n55 def health_check(request):\n56 \"\"\"Return heath check response for Google App Engine.\n57 \n58 Returns a 200 HTTP response for Google App Engine to detect the system\n59 is running.\n60 \"\"\"\n61 return HttpResponse(status=200)\n62\n[end of csunplugged/general/views.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -9,6 +9,16 @@\n \n template_name = \"general/index.html\"\n \n+ def get_context_data(self, **kwargs):\n+ \"\"\"Provide the context data for the homepage.\n+\n+ Returns:\n+ Dictionary of context data.\n+ \"\"\"\n+ context = super(GeneralIndexView, self).get_context_data(**kwargs)\n+ context[\"homepage\"] = True\n+ return context\n+\n \n class GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\\n--- a/csunplugged/general/views.py\\n+++ b/csunplugged/general/views.py\\n@@ -9,6 +9,16 @@\\n \\n template_name = \\\"general/index.html\\\"\\n \\n+ def get_context_data(self, **kwargs):\\n+ \\\"\\\"\\\"Provide the context data for the homepage.\\n+\\n+ Returns:\\n+ Dictionary of context data.\\n+ \\\"\\\"\\\"\\n+ context = super(GeneralIndexView, self).get_context_data(**kwargs)\\n+ context[\\\"homepage\\\"] = True\\n+ return context\\n+\\n \\n class GeneralAboutView(TemplateView):\\n \\\"\\\"\\\"View for the about page that renders from a template.\\\"\\\"\\\"\\n\", \"issue\": \"Minor adjustments to navbar and homepage\\n## Navbar\\r\\n\\r\\n- [x] There should be more space between logo and 'Topics'.\\r\\n- [x] The search bar can be ~20% smaller.\\r\\n\\r\\n## Hompeage\\r\\n\\r\\n- [x] Navbar should be transparent and fade in when user scrolls down.\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Views for the general application.\\\"\\\"\\\"\\n\\nfrom django.views.generic import TemplateView\\nfrom django.http import HttpResponse\\n\\n\\nclass GeneralIndexView(TemplateView):\\n \\\"\\\"\\\"View for the homepage that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/index.html\\\"\\n\\n\\nclass GeneralAboutView(TemplateView):\\n \\\"\\\"\\\"View for the about page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/about.html\\\"\\n\\n\\nclass GeneralContactView(TemplateView):\\n \\\"\\\"\\\"View for the contact page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/contact.html\\\"\\n\\n\\nclass GeneralPeopleView(TemplateView):\\n \\\"\\\"\\\"View for the people page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/people.html\\\"\\n\\n\\nclass GeneralPrinciplesView(TemplateView):\\n \\\"\\\"\\\"View for the princples page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/principles.html\\\"\\n\\n\\nclass WhatIsCSView(TemplateView):\\n \\\"\\\"\\\"View for the 'What is Computer Science?' page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/what-is-computer-science.html\\\"\\n\\n\\nclass ComputationalThinkingView(TemplateView):\\n \\\"\\\"\\\"View for the Computational Thinking page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/computational-thinking.html\\\"\\n\\n\\nclass HowDoITeachCSUnpluggedView(TemplateView):\\n \\\"\\\"\\\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\\\"\\\"\\\"\\n\\n template_name = \\\"general/how-do-i-teach-cs-unplugged.html\\\"\\n\\n\\ndef health_check(request):\\n \\\"\\\"\\\"Return heath check response for Google App Engine.\\n\\n Returns a 200 HTTP response for Google App Engine to detect the system\\n is running.\\n \\\"\\\"\\\"\\n return HttpResponse(status=200)\\n\", \"path\": \"csunplugged/general/views.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1100,"string":"1,100"},"num_tokens_diff":{"kind":"number","value":154,"string":"154"}}},{"rowIdx":18172,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_40993"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"apluslms__a-plus-1062"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nFeature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\nFeature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\n\n\n\n[start of news/forms.py]\n1 from django import forms\n2 \n3 from .models import News\n4 \n5 \n6 class NewsForm(forms.ModelForm):\n7 \n8 class Meta:\n9 model = News\n10 fields = [\n11 'audience',\n12 'publish',\n13 'pin',\n14 'title',\n15 'body',\n16 ]\n17\n[end of news/forms.py]\n[start of lib/email_messages.py]\n1 import logging\n2 import traceback\n3 from django.conf import settings\n4 from django.core.mail import send_mail\n5 from django.urls import reverse\n6 from .helpers import build_aplus_url\n7 \n8 \n9 logger = logging.getLogger('aplus.lib.email_messages')\n10 \n11 \n12 def email_course_instance(instance, subject, message, everyone=False) -> bool:\n13 \"\"\"\n14 Sends an email to a course instance's technical support emails or teachers if technical support not set.\n15 If everyone == True, sends emails to teachers anyway.\n16 \"\"\"\n17 recipients = []\n18 if instance.technical_error_emails:\n19 recipients = instance.technical_error_emails.split(\",\")\n20 if everyone or not recipients:\n21 recipients = instance.teachers.exclude(user__email='').values_list(\"user__email\", flat=True)\n22 \n23 if not recipients:\n24 raise ValueError(\"No recipients\")\n25 \n26 try:\n27 return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\n28 except:\n29 logger.exception('Failed to send course instance emails.')\n30 raise\n31 \n32 \n33 def email_course_error(request, exercise, message, exception=True):\n34 \"\"\"\n35 Sends error message to course instance's teachers or technical support emails if set.\n36 \"\"\"\n37 instance = exercise.course_instance\n38 \n39 error_trace = \"-\"\n40 if exception:\n41 error_trace = traceback.format_exc()\n42 \n43 if request:\n44 request_fields = repr(request)\n45 else:\n46 request_fields = \"No request available\"\n47 \n48 subject = settings.EXERCISE_ERROR_SUBJECT.format(\n49 course=instance.course.code,\n50 exercise=str(exercise))\n51 body = settings.EXERCISE_ERROR_DESCRIPTION.format(\n52 message=message,\n53 exercise_url=build_aplus_url(\n54 exercise.get_absolute_url(), user_url=True),\n55 course_edit_url=build_aplus_url(\n56 instance.get_url('course-details'), user_url=True),\n57 error_trace=error_trace,\n58 request_fields=request_fields)\n59 \n60 try:\n61 email_course_instance(instance, subject, body)\n62 except:\n63 pass\n64\n[end of lib/email_messages.py]\n[start of news/views.py]\n1 from django.core.exceptions import PermissionDenied\n2 from django.http import Http404\n3 from django.shortcuts import get_object_or_404\n4 \n5 from authorization.permissions import ACCESS\n6 from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\n7 from lib.viewbase import BaseFormView, BaseRedirectView\n8 from .forms import NewsForm\n9 from .models import News\n10 \n11 \n12 class ListNewsView(CourseInstanceBaseView):\n13 access_mode = ACCESS.TEACHER\n14 template_name = \"news/list.html\"\n15 \n16 def get_common_objects(self):\n17 super().get_common_objects()\n18 self.news = self.instance.news.all()\n19 self.note(\"news\")\n20 \n21 \n22 class EditNewsView(CourseInstanceMixin, BaseFormView):\n23 access_mode = ACCESS.TEACHER\n24 template_name = \"news/edit.html\"\n25 form_class = NewsForm\n26 news_item_kw = \"news_id\"\n27 \n28 def get_form_kwargs(self):\n29 kwargs = super().get_form_kwargs()\n30 \n31 news_id = self._get_kwarg(self.news_item_kw, default=None)\n32 if news_id:\n33 self.news_item = get_object_or_404(\n34 News,\n35 pk=news_id,\n36 course_instance=self.instance\n37 )\n38 self.note(\"news_item\")\n39 else:\n40 self.news_item = News(course_instance=self.instance)\n41 \n42 kwargs[\"instance\"] = self.news_item\n43 return kwargs\n44 \n45 def get_success_url(self):\n46 return self.instance.get_url(\"news-list\")\n47 \n48 def form_valid(self, form):\n49 form.save()\n50 return super().form_valid(form)\n51 \n52 \n53 class RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\n54 access_mode = ACCESS.TEACHER\n55 news_item_kw = \"news_id\"\n56 \n57 def get_resource_objects(self):\n58 super().get_resource_objects()\n59 self.news_item = get_object_or_404(\n60 News,\n61 id=self._get_kwarg(self.news_item_kw),\n62 course_instance=self.instance,\n63 )\n64 self.note(\"news_item\")\n65 \n66 def post(self, request, *args, **kwargs):\n67 self.news_item.delete()\n68 return self.redirect(self.instance.get_url(\"news-list\"))\n69\n[end of news/views.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/lib/email_messages.py b/lib/email_messages.py\n--- a/lib/email_messages.py\n+++ b/lib/email_messages.py\n@@ -1,9 +1,10 @@\n import logging\n import traceback\n from django.conf import settings\n-from django.core.mail import send_mail\n-from django.urls import reverse\n-from .helpers import build_aplus_url\n+from django.core.mail import send_mail, send_mass_mail\n+\n+from .helpers import Enum, build_aplus_url\n+from course.models import CourseInstance\n \n \n logger = logging.getLogger('aplus.lib.email_messages')\n@@ -61,3 +62,30 @@\n email_course_instance(instance, subject, body)\n except:\n pass\n+\n+\n+def email_course_students(\n+ instance: CourseInstance,\n+ subject: str,\n+ message: str,\n+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\n+ ) -> int:\n+ \"\"\"\n+ Sends an email to students on the course. Audience parameter controls whether the mail goes\n+ to all (default), just internal, or just external students.\n+ Returns number of emails sent, or -1 in case of error.\n+ \"\"\"\n+ students = instance.students\n+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\n+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)\n+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\n+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\n+\n+ recipients = students.exclude(user__email='').values_list(\"user__email\", flat=True)\n+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\n+\n+ try:\n+ return send_mass_mail(emails)\n+ except:\n+ logger.exception('Failed to send course instance emails.')\n+ return -1\ndiff --git a/news/forms.py b/news/forms.py\n--- a/news/forms.py\n+++ b/news/forms.py\n@@ -1,16 +1,25 @@\n+from typing import Any\n+\n from django import forms\n+from django.utils.translation import gettext_lazy as _\n \n from .models import News\n \n \n class NewsForm(forms.ModelForm):\n \n+ email = forms.BooleanField(\n+ required=False,\n+ label=_(\"SEND_EMAIL_TO_STUDENTS\"),\n+ )\n+\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n+ 'email',\n 'title',\n 'body',\n ]\ndiff --git a/news/views.py b/news/views.py\n--- a/news/views.py\n+++ b/news/views.py\n@@ -1,10 +1,14 @@\n+from django.conf import settings\n+from django.contrib import messages\n from django.core.exceptions import PermissionDenied\n from django.http import Http404\n from django.shortcuts import get_object_or_404\n+from django.utils.translation import gettext_lazy as _\n \n from authorization.permissions import ACCESS\n from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\n from lib.viewbase import BaseFormView, BaseRedirectView\n+from lib.email_messages import email_course_students\n from .forms import NewsForm\n from .models import News\n \n@@ -47,6 +51,15 @@\n \n def form_valid(self, form):\n form.save()\n+ if form.cleaned_data['email']:\n+ subject = f\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\"\n+ if email_course_students(\n+ self.instance,\n+ subject,\n+ self.news_item.body,\n+ self.news_item.audience,\n+ ) < 0:\n+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\n return super().form_valid(form)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/lib/email_messages.py b/lib/email_messages.py\\n--- a/lib/email_messages.py\\n+++ b/lib/email_messages.py\\n@@ -1,9 +1,10 @@\\n import logging\\n import traceback\\n from django.conf import settings\\n-from django.core.mail import send_mail\\n-from django.urls import reverse\\n-from .helpers import build_aplus_url\\n+from django.core.mail import send_mail, send_mass_mail\\n+\\n+from .helpers import Enum, build_aplus_url\\n+from course.models import CourseInstance\\n \\n \\n logger = logging.getLogger('aplus.lib.email_messages')\\n@@ -61,3 +62,30 @@\\n email_course_instance(instance, subject, body)\\n except:\\n pass\\n+\\n+\\n+def email_course_students(\\n+ instance: CourseInstance,\\n+ subject: str,\\n+ message: str,\\n+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\\n+ ) -> int:\\n+ \\\"\\\"\\\"\\n+ Sends an email to students on the course. Audience parameter controls whether the mail goes\\n+ to all (default), just internal, or just external students.\\n+ Returns number of emails sent, or -1 in case of error.\\n+ \\\"\\\"\\\"\\n+ students = instance.students\\n+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\\n+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)\\n+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\\n+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\\n+\\n+ recipients = students.exclude(user__email='').values_list(\\\"user__email\\\", flat=True)\\n+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\\n+\\n+ try:\\n+ return send_mass_mail(emails)\\n+ except:\\n+ logger.exception('Failed to send course instance emails.')\\n+ return -1\\ndiff --git a/news/forms.py b/news/forms.py\\n--- a/news/forms.py\\n+++ b/news/forms.py\\n@@ -1,16 +1,25 @@\\n+from typing import Any\\n+\\n from django import forms\\n+from django.utils.translation import gettext_lazy as _\\n \\n from .models import News\\n \\n \\n class NewsForm(forms.ModelForm):\\n \\n+ email = forms.BooleanField(\\n+ required=False,\\n+ label=_(\\\"SEND_EMAIL_TO_STUDENTS\\\"),\\n+ )\\n+\\n class Meta:\\n model = News\\n fields = [\\n 'audience',\\n 'publish',\\n 'pin',\\n+ 'email',\\n 'title',\\n 'body',\\n ]\\ndiff --git a/news/views.py b/news/views.py\\n--- a/news/views.py\\n+++ b/news/views.py\\n@@ -1,10 +1,14 @@\\n+from django.conf import settings\\n+from django.contrib import messages\\n from django.core.exceptions import PermissionDenied\\n from django.http import Http404\\n from django.shortcuts import get_object_or_404\\n+from django.utils.translation import gettext_lazy as _\\n \\n from authorization.permissions import ACCESS\\n from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\\n from lib.viewbase import BaseFormView, BaseRedirectView\\n+from lib.email_messages import email_course_students\\n from .forms import NewsForm\\n from .models import News\\n \\n@@ -47,6 +51,15 @@\\n \\n def form_valid(self, form):\\n form.save()\\n+ if form.cleaned_data['email']:\\n+ subject = f\\\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\\\"\\n+ if email_course_students(\\n+ self.instance,\\n+ subject,\\n+ self.news_item.body,\\n+ self.news_item.audience,\\n+ ) < 0:\\n+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\\n return super().form_valid(form)\\n\", \"issue\": \"Feature request: send email when teacher adds news\\nHi\\r\\n\\r\\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\\r\\n\\r\\nWhat do you think?\\r\\n\\r\\nThanks!\\nFeature request: send email when teacher adds news\\nHi\\r\\n\\r\\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\\r\\n\\r\\nWhat do you think?\\r\\n\\r\\nThanks!\\n\", \"before_files\": [{\"content\": \"from django import forms\\n\\nfrom .models import News\\n\\n\\nclass NewsForm(forms.ModelForm):\\n\\n class Meta:\\n model = News\\n fields = [\\n 'audience',\\n 'publish',\\n 'pin',\\n 'title',\\n 'body',\\n ]\\n\", \"path\": \"news/forms.py\"}, {\"content\": \"import logging\\nimport traceback\\nfrom django.conf import settings\\nfrom django.core.mail import send_mail\\nfrom django.urls import reverse\\nfrom .helpers import build_aplus_url\\n\\n\\nlogger = logging.getLogger('aplus.lib.email_messages')\\n\\n\\ndef email_course_instance(instance, subject, message, everyone=False) -> bool:\\n \\\"\\\"\\\"\\n Sends an email to a course instance's technical support emails or teachers if technical support not set.\\n If everyone == True, sends emails to teachers anyway.\\n \\\"\\\"\\\"\\n recipients = []\\n if instance.technical_error_emails:\\n recipients = instance.technical_error_emails.split(\\\",\\\")\\n if everyone or not recipients:\\n recipients = instance.teachers.exclude(user__email='').values_list(\\\"user__email\\\", flat=True)\\n\\n if not recipients:\\n raise ValueError(\\\"No recipients\\\")\\n\\n try:\\n return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\\n except:\\n logger.exception('Failed to send course instance emails.')\\n raise\\n\\n\\ndef email_course_error(request, exercise, message, exception=True):\\n \\\"\\\"\\\"\\n Sends error message to course instance's teachers or technical support emails if set.\\n \\\"\\\"\\\"\\n instance = exercise.course_instance\\n\\n error_trace = \\\"-\\\"\\n if exception:\\n error_trace = traceback.format_exc()\\n\\n if request:\\n request_fields = repr(request)\\n else:\\n request_fields = \\\"No request available\\\"\\n\\n subject = settings.EXERCISE_ERROR_SUBJECT.format(\\n course=instance.course.code,\\n exercise=str(exercise))\\n body = settings.EXERCISE_ERROR_DESCRIPTION.format(\\n message=message,\\n exercise_url=build_aplus_url(\\n exercise.get_absolute_url(), user_url=True),\\n course_edit_url=build_aplus_url(\\n instance.get_url('course-details'), user_url=True),\\n error_trace=error_trace,\\n request_fields=request_fields)\\n\\n try:\\n email_course_instance(instance, subject, body)\\n except:\\n pass\\n\", \"path\": \"lib/email_messages.py\"}, {\"content\": \"from django.core.exceptions import PermissionDenied\\nfrom django.http import Http404\\nfrom django.shortcuts import get_object_or_404\\n\\nfrom authorization.permissions import ACCESS\\nfrom course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\\nfrom lib.viewbase import BaseFormView, BaseRedirectView\\nfrom .forms import NewsForm\\nfrom .models import News\\n\\n\\nclass ListNewsView(CourseInstanceBaseView):\\n access_mode = ACCESS.TEACHER\\n template_name = \\\"news/list.html\\\"\\n\\n def get_common_objects(self):\\n super().get_common_objects()\\n self.news = self.instance.news.all()\\n self.note(\\\"news\\\")\\n\\n\\nclass EditNewsView(CourseInstanceMixin, BaseFormView):\\n access_mode = ACCESS.TEACHER\\n template_name = \\\"news/edit.html\\\"\\n form_class = NewsForm\\n news_item_kw = \\\"news_id\\\"\\n\\n def get_form_kwargs(self):\\n kwargs = super().get_form_kwargs()\\n\\n news_id = self._get_kwarg(self.news_item_kw, default=None)\\n if news_id:\\n self.news_item = get_object_or_404(\\n News,\\n pk=news_id,\\n course_instance=self.instance\\n )\\n self.note(\\\"news_item\\\")\\n else:\\n self.news_item = News(course_instance=self.instance)\\n\\n kwargs[\\\"instance\\\"] = self.news_item\\n return kwargs\\n\\n def get_success_url(self):\\n return self.instance.get_url(\\\"news-list\\\")\\n\\n def form_valid(self, form):\\n form.save()\\n return super().form_valid(form)\\n\\n\\nclass RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\\n access_mode = ACCESS.TEACHER\\n news_item_kw = \\\"news_id\\\"\\n\\n def get_resource_objects(self):\\n super().get_resource_objects()\\n self.news_item = get_object_or_404(\\n News,\\n id=self._get_kwarg(self.news_item_kw),\\n course_instance=self.instance,\\n )\\n self.note(\\\"news_item\\\")\\n\\n def post(self, request, *args, **kwargs):\\n self.news_item.delete()\\n return self.redirect(self.instance.get_url(\\\"news-list\\\"))\\n\", \"path\": \"news/views.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1869,"string":"1,869"},"num_tokens_diff":{"kind":"number","value":812,"string":"812"}}},{"rowIdx":18173,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_358"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"spacetelescope__jwql-550"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nCron jobs for monitors currently failing \nTraceback (most recent call last):\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py\", line 58, in \r\n from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py\", line 77, in \r\n from jwql.jwql_monitors import monitor_mast\r\n File \"/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py\", line 25, in \r\n from bokeh.embed import components\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py\", line 81, in \r\n from .util import logconfig\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py\", line 87, in \r\n level = settings.py_log_level()\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 310, in __call__\r\n return self._convert(os.environ[self._env_var])\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 236, in convert_logging\r\n raise ValueError(\"Cannot convert {} to log level, valid values are: {}\".format(value, \", \".join(_log_levels)))\r\nValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE\n\n\n\n[start of setup.py]\n1 import numpy as np\n2 from setuptools import setup\n3 from setuptools import find_packages\n4 \n5 VERSION = '0.22.0'\n6 \n7 AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n9 \n10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n11 \n12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\n13 REQUIRES = [\n14 'asdf>=2.3.3',\n15 'astropy>=3.2.1',\n16 'astroquery>=0.3.9',\n17 'authlib',\n18 'bokeh>=1.0',\n19 'codecov',\n20 'django>=2.0',\n21 'flake8',\n22 'inflection',\n23 'ipython',\n24 'jinja2',\n25 'jsonschema==2.6.0',\n26 'jwedb>=0.0.3',\n27 'matplotlib',\n28 'numpy',\n29 'numpydoc',\n30 'pandas',\n31 'psycopg2',\n32 'pysiaf',\n33 'pytest',\n34 'pytest-cov',\n35 'scipy',\n36 'sphinx',\n37 'sqlalchemy',\n38 'stsci_rtd_theme',\n39 'twine'\n40 ]\n41 \n42 setup(\n43 name='jwql',\n44 version=VERSION,\n45 description=DESCRIPTION,\n46 url='https://github.com/spacetelescope/jwql.git',\n47 author=AUTHORS,\n48 author_email='jwql@stsci.edu',\n49 license='BSD',\n50 keywords=['astronomy', 'python'],\n51 classifiers=['Programming Language :: Python'],\n52 packages=find_packages(),\n53 install_requires=REQUIRES,\n54 dependency_links=DEPENDENCY_LINKS,\n55 include_package_data=True,\n56 include_dirs=[np.get_include()],\n57 )\n58\n[end of setup.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n- 'bokeh>=1.0',\n+ 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/setup.py b/setup.py\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -15,7 +15,7 @@\\n 'astropy>=3.2.1',\\n 'astroquery>=0.3.9',\\n 'authlib',\\n- 'bokeh>=1.0',\\n+ 'bokeh>=1.0,<1.4',\\n 'codecov',\\n 'django>=2.0',\\n 'flake8',\\n\", \"issue\": \"Cron jobs for monitors currently failing \\nTraceback (most recent call last):\\r\\n File \\\"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py\\\", line 58, in \\r\\n from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks\\r\\n File \\\"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py\\\", line 77, in \\r\\n from jwql.jwql_monitors import monitor_mast\\r\\n File \\\"/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py\\\", line 25, in \\r\\n from bokeh.embed import components\\r\\n File \\\"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py\\\", line 81, in \\r\\n from .util import logconfig\\r\\n File \\\"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py\\\", line 87, in \\r\\n level = settings.py_log_level()\\r\\n File \\\"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\\\", line 310, in __call__\\r\\n return self._convert(os.environ[self._env_var])\\r\\n File \\\"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\\\", line 236, in convert_logging\\r\\n raise ValueError(\\\"Cannot convert {} to log level, valid values are: {}\\\".format(value, \\\", \\\".join(_log_levels)))\\r\\nValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE\\n\", \"before_files\": [{\"content\": \"import numpy as np\\nfrom setuptools import setup\\nfrom setuptools import find_packages\\n\\nVERSION = '0.22.0'\\n\\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\\n\\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\\n\\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\\nREQUIRES = [\\n 'asdf>=2.3.3',\\n 'astropy>=3.2.1',\\n 'astroquery>=0.3.9',\\n 'authlib',\\n 'bokeh>=1.0',\\n 'codecov',\\n 'django>=2.0',\\n 'flake8',\\n 'inflection',\\n 'ipython',\\n 'jinja2',\\n 'jsonschema==2.6.0',\\n 'jwedb>=0.0.3',\\n 'matplotlib',\\n 'numpy',\\n 'numpydoc',\\n 'pandas',\\n 'psycopg2',\\n 'pysiaf',\\n 'pytest',\\n 'pytest-cov',\\n 'scipy',\\n 'sphinx',\\n 'sqlalchemy',\\n 'stsci_rtd_theme',\\n 'twine'\\n]\\n\\nsetup(\\n name='jwql',\\n version=VERSION,\\n description=DESCRIPTION,\\n url='https://github.com/spacetelescope/jwql.git',\\n author=AUTHORS,\\n author_email='jwql@stsci.edu',\\n license='BSD',\\n keywords=['astronomy', 'python'],\\n classifiers=['Programming Language :: Python'],\\n packages=find_packages(),\\n install_requires=REQUIRES,\\n dependency_links=DEPENDENCY_LINKS,\\n include_package_data=True,\\n include_dirs=[np.get_include()],\\n)\\n\", \"path\": \"setup.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1494,"string":"1,494"},"num_tokens_diff":{"kind":"number","value":101,"string":"101"}}},{"rowIdx":18174,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_1710"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"encode__httpx-407"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nASGIDispatch and WSGIDispatch should be importable from the top-level httpx package\nFrom #396:\r\n\r\n> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.\n\n\n\n[start of httpx/dispatch/__init__.py]\n1 \"\"\"\n2 Dispatch classes handle the raw network connections and the implementation\n3 details of making the HTTP request and receiving the response.\n4 \"\"\"\n5\n[end of httpx/dispatch/__init__.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py\n--- a/httpx/dispatch/__init__.py\n+++ b/httpx/dispatch/__init__.py\n@@ -2,3 +2,7 @@\n Dispatch classes handle the raw network connections and the implementation\n details of making the HTTP request and receiving the response.\n \"\"\"\n+from .asgi import ASGIDispatch\n+from .wsgi import WSGIDispatch\n+\n+__all__ = [\"ASGIDispatch\", \"WSGIDispatch\"]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py\\n--- a/httpx/dispatch/__init__.py\\n+++ b/httpx/dispatch/__init__.py\\n@@ -2,3 +2,7 @@\\n Dispatch classes handle the raw network connections and the implementation\\n details of making the HTTP request and receiving the response.\\n \\\"\\\"\\\"\\n+from .asgi import ASGIDispatch\\n+from .wsgi import WSGIDispatch\\n+\\n+__all__ = [\\\"ASGIDispatch\\\", \\\"WSGIDispatch\\\"]\\n\", \"issue\": \"ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package\\nFrom #396:\\r\\n\\r\\n> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"\\nDispatch classes handle the raw network connections and the implementation\\ndetails of making the HTTP request and receiving the response.\\n\\\"\\\"\\\"\\n\", \"path\": \"httpx/dispatch/__init__.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":647,"string":"647"},"num_tokens_diff":{"kind":"number","value":121,"string":"121"}}},{"rowIdx":18175,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_21028"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"techmatters__terraso-backend-141"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nLandscape creation and update\n\r\n\r\n## Description\r\n- Default landscape group should be created when a new landscape is created\r\n- Manager should be assigned at the creation of a landscape\r\n- Only managers can update landscape data\n\n\n\n[start of terraso_backend/apps/core/models/landscapes.py]\n1 import structlog\n2 from django.db import models\n3 \n4 from apps.core import permission_rules as perm_rules\n5 \n6 from .commons import BaseModel, SlugModel\n7 from .groups import Group\n8 from .users import User\n9 \n10 logger = structlog.get_logger(__name__)\n11 \n12 \n13 class Landscape(SlugModel):\n14 \"\"\"\n15 This model represents a Landscape on Terraso platform.\n16 \n17 A Landscape is a socio-ecological system that consists of natural\n18 and/or human-modified ecosystems. Defined by its stakeholds, a\n19 Landscape usually has geographical boundaries. It may correspond to,\n20 or be a combination of, natural boundaries, distinct land features,\n21 socially defined areas such as indigenous territories, and/or\n22 jurisdictional and administrative boundaries. The boundaries of a\n23 Landscape can cross several countries.\n24 \"\"\"\n25 \n26 name = models.CharField(max_length=128, unique=True)\n27 description = models.TextField(max_length=512, blank=True, default=\"\")\n28 website = models.URLField(blank=True, default=\"\")\n29 location = models.CharField(max_length=128, blank=True, default=\"\")\n30 area_polygon = models.JSONField(blank=True, null=True)\n31 \n32 created_by = models.ForeignKey(\n33 User,\n34 blank=True,\n35 null=True,\n36 on_delete=models.PROTECT,\n37 related_name=\"created_landscapes\",\n38 )\n39 groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n40 \n41 field_to_slug = \"name\"\n42 \n43 class Meta(SlugModel.Meta):\n44 rules_permissions = {\n45 \"change\": perm_rules.allowed_to_change_landscape,\n46 \"delete\": perm_rules.allowed_to_delete_landscape,\n47 }\n48 \n49 def get_default_group(self):\n50 \"\"\"\n51 A default Group in a Landscape is that Group where any\n52 individual (associated or not with other Groups) is added when\n53 associating directly with a Landscape.\n54 \"\"\"\n55 try:\n56 # associated_groups is the related_name defined on\n57 # LandscapeGroup relationship with Landscape. It returns a\n58 # queryset of LandscapeGroup\n59 landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n60 except LandscapeGroup.DoesNotExist:\n61 logger.error(\n62 \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n63 )\n64 return None\n65 \n66 return landscape_group.group\n67 \n68 def __str__(self):\n69 return self.name\n70 \n71 \n72 class LandscapeGroup(BaseModel):\n73 \"\"\"\n74 This model represents the association between a Landscape and a Group on\n75 Terraso platform.\n76 \"\"\"\n77 \n78 landscape = models.ForeignKey(\n79 Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n80 )\n81 group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n82 \n83 is_default_landscape_group = models.BooleanField(blank=True, default=False)\n84 \n85 class Meta:\n86 rules_permissions = {\n87 \"add\": perm_rules.allowed_to_add_landscape_group,\n88 \"delete\": perm_rules.allowed_to_delete_landscape_group,\n89 }\n90 constraints = (\n91 models.UniqueConstraint(\n92 fields=(\"group\", \"landscape\"),\n93 condition=models.Q(deleted_at__isnull=True),\n94 name=\"unique_active_landscape_group\",\n95 ),\n96 )\n97\n[end of terraso_backend/apps/core/models/landscapes.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py\n--- a/terraso_backend/apps/core/models/landscapes.py\n+++ b/terraso_backend/apps/core/models/landscapes.py\n@@ -1,5 +1,5 @@\n import structlog\n-from django.db import models\n+from django.db import models, transaction\n \n from apps.core import permission_rules as perm_rules\n \n@@ -46,6 +46,24 @@\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n \n+ def save(self, *args, **kwargs):\n+ with transaction.atomic():\n+ creating = not Landscape.objects.filter(pk=self.pk).exists()\n+\n+ super().save(*args, **kwargs)\n+\n+ if creating and self.created_by:\n+ group = Group(\n+ name=\"Group {}\".format(self.slug),\n+ description=\"\",\n+ created_by=self.created_by,\n+ )\n+ group.save()\n+ landscape_group = LandscapeGroup(\n+ group=group, landscape=self, is_default_landscape_group=True\n+ )\n+ landscape_group.save()\n+\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py\\n--- a/terraso_backend/apps/core/models/landscapes.py\\n+++ b/terraso_backend/apps/core/models/landscapes.py\\n@@ -1,5 +1,5 @@\\n import structlog\\n-from django.db import models\\n+from django.db import models, transaction\\n \\n from apps.core import permission_rules as perm_rules\\n \\n@@ -46,6 +46,24 @@\\n \\\"delete\\\": perm_rules.allowed_to_delete_landscape,\\n }\\n \\n+ def save(self, *args, **kwargs):\\n+ with transaction.atomic():\\n+ creating = not Landscape.objects.filter(pk=self.pk).exists()\\n+\\n+ super().save(*args, **kwargs)\\n+\\n+ if creating and self.created_by:\\n+ group = Group(\\n+ name=\\\"Group {}\\\".format(self.slug),\\n+ description=\\\"\\\",\\n+ created_by=self.created_by,\\n+ )\\n+ group.save()\\n+ landscape_group = LandscapeGroup(\\n+ group=group, landscape=self, is_default_landscape_group=True\\n+ )\\n+ landscape_group.save()\\n+\\n def get_default_group(self):\\n \\\"\\\"\\\"\\n A default Group in a Landscape is that Group where any\\n\", \"issue\": \"Landscape creation and update\\n\\r\\n\\r\\n## Description\\r\\n- Default landscape group should be created when a new landscape is created\\r\\n- Manager should be assigned at the creation of a landscape\\r\\n- Only managers can update landscape data\\n\", \"before_files\": [{\"content\": \"import structlog\\nfrom django.db import models\\n\\nfrom apps.core import permission_rules as perm_rules\\n\\nfrom .commons import BaseModel, SlugModel\\nfrom .groups import Group\\nfrom .users import User\\n\\nlogger = structlog.get_logger(__name__)\\n\\n\\nclass Landscape(SlugModel):\\n \\\"\\\"\\\"\\n This model represents a Landscape on Terraso platform.\\n\\n A Landscape is a socio-ecological system that consists of natural\\n and/or human-modified ecosystems. Defined by its stakeholds, a\\n Landscape usually has geographical boundaries. It may correspond to,\\n or be a combination of, natural boundaries, distinct land features,\\n socially defined areas such as indigenous territories, and/or\\n jurisdictional and administrative boundaries. The boundaries of a\\n Landscape can cross several countries.\\n \\\"\\\"\\\"\\n\\n name = models.CharField(max_length=128, unique=True)\\n description = models.TextField(max_length=512, blank=True, default=\\\"\\\")\\n website = models.URLField(blank=True, default=\\\"\\\")\\n location = models.CharField(max_length=128, blank=True, default=\\\"\\\")\\n area_polygon = models.JSONField(blank=True, null=True)\\n\\n created_by = models.ForeignKey(\\n User,\\n blank=True,\\n null=True,\\n on_delete=models.PROTECT,\\n related_name=\\\"created_landscapes\\\",\\n )\\n groups = models.ManyToManyField(Group, through=\\\"LandscapeGroup\\\")\\n\\n field_to_slug = \\\"name\\\"\\n\\n class Meta(SlugModel.Meta):\\n rules_permissions = {\\n \\\"change\\\": perm_rules.allowed_to_change_landscape,\\n \\\"delete\\\": perm_rules.allowed_to_delete_landscape,\\n }\\n\\n def get_default_group(self):\\n \\\"\\\"\\\"\\n A default Group in a Landscape is that Group where any\\n individual (associated or not with other Groups) is added when\\n associating directly with a Landscape.\\n \\\"\\\"\\\"\\n try:\\n # associated_groups is the related_name defined on\\n # LandscapeGroup relationship with Landscape. It returns a\\n # queryset of LandscapeGroup\\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\\n except LandscapeGroup.DoesNotExist:\\n logger.error(\\n \\\"Landscape has no default group, but it must have\\\", extra={\\\"landscape_id\\\": self.pk}\\n )\\n return None\\n\\n return landscape_group.group\\n\\n def __str__(self):\\n return self.name\\n\\n\\nclass LandscapeGroup(BaseModel):\\n \\\"\\\"\\\"\\n This model represents the association between a Landscape and a Group on\\n Terraso platform.\\n \\\"\\\"\\\"\\n\\n landscape = models.ForeignKey(\\n Landscape, on_delete=models.CASCADE, related_name=\\\"associated_groups\\\"\\n )\\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\\\"associated_landscapes\\\")\\n\\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\\n\\n class Meta:\\n rules_permissions = {\\n \\\"add\\\": perm_rules.allowed_to_add_landscape_group,\\n \\\"delete\\\": perm_rules.allowed_to_delete_landscape_group,\\n }\\n constraints = (\\n models.UniqueConstraint(\\n fields=(\\\"group\\\", \\\"landscape\\\"),\\n condition=models.Q(deleted_at__isnull=True),\\n name=\\\"unique_active_landscape_group\\\",\\n ),\\n )\\n\", \"path\": \"terraso_backend/apps/core/models/landscapes.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1476,"string":"1,476"},"num_tokens_diff":{"kind":"number","value":278,"string":"278"}}},{"rowIdx":18176,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_31833"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"Flexget__Flexget-2858"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\ntorrentday pluging winth uid secret gets error expecting string\n### Expected behaviour:\r\n\r\nSetting uid from secrets file should work and update config\r\n\r\n### Actual behaviour:\r\n\r\nWhen setting the uid from a secret file, since uid is a number, it's converted to integer on the config, this makes a error in the config compilation of:\r\n\r\n/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\r\n\r\n### Steps to reproduce:\r\n- Step 1: Add config and run flexget check\r\n\r\n#### Config:\r\n```yaml\r\n TV_TorrentDay:\r\n discover:\r\n what:\r\n - next_series_episodes: yes\r\n from:\r\n - torrentday:\r\n uid: \"{? torrentday.uid ?}\"\r\n passkey: '{? torrentday.passkey ?}'\r\n cfduid: '{? torrentday.cfduid ?}'\r\n rss_key: '{? torrentday.rss_key ?}'\r\n category: 'tvBRD'\r\n```\r\nsecrets.yaml\r\n```yaml\r\ntorrentday:\r\n uid: \"1234567\"\r\n passkey: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n cfduid: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n rss_key: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n```\r\n \r\n#### Log:\r\n
\r\n (click to expand)\r\n\r\n```\r\n2021-02-16 01:29:54 CRITICAL manager [/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\r\n2021-02-16 01:29:54 CRITICAL manager Failed to load config file: Did not pass schema validation.\r\n\r\n```\r\n
\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.1.102\r\n- Python version: 3.7.3\r\n- Installation method: venv\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian 10\n\n
\n\n[start of flexget/components/sites/sites/torrentday.py]\n1 import re\n2 \n3 from loguru import logger\n4 from requests.exceptions import RequestException\n5 \n6 from flexget import plugin\n7 from flexget.components.sites.urlrewriting import UrlRewritingError\n8 from flexget.components.sites.utils import normalize_unicode, torrent_availability\n9 from flexget.entry import Entry\n10 from flexget.event import event\n11 from flexget.plugin import PluginError\n12 from flexget.utils import requests\n13 from flexget.utils.soup import get_soup\n14 from flexget.utils.tools import parse_filesize\n15 \n16 logger = logger.bind(name='torrentday')\n17 \n18 CATEGORIES = {\n19 'all': 0,\n20 # Movies\n21 'mov4k': 96,\n22 'mov480p': 25,\n23 'movHD': 11,\n24 'movBD': 5,\n25 'movDVD': 3,\n26 'movMP4': 21,\n27 'movNonEnglish': 22,\n28 'movPACKS': 13,\n29 'movSDx264': 44,\n30 'movX265': 48,\n31 'movXVID': 1,\n32 # TV\n33 'tv480p': 24,\n34 'tvBRD': 32,\n35 'tvDVD': 31,\n36 'tvDVDrip': 33,\n37 'tvMOBILE': 46,\n38 'tvNonEnglish': 82,\n39 'tvPACKS': 14,\n40 'tvSDx264': 26,\n41 'tvHDx264': 7,\n42 'tvX265': 34,\n43 'tvXVID': 2,\n44 }\n45 \n46 \n47 class UrlRewriteTorrentday:\n48 \"\"\"\n49 Torrentday urlrewriter and search plugin.\n50 \n51 torrentday:\n52 uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n53 passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n54 cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n55 rss_key: xxxxxxxxx (required) get this from your profile page\n56 category: xxxxxxxx\n57 \n58 Category can be one of\n59 ID from browsing site OR 'name'\n60 movies:\n61 mov4k, mov480p, movHD, movBD, movDVD,\n62 movMP4, movNonEnglish, movPACKS,\n63 movSDx264, movX265, movXVID\n64 tv:\n65 tv480p, tvBRD, tvDVD, tvDVDrip,\n66 tvMOBILE, tvNonEnglish, tvPACKS,\n67 tvSDx264, tvHDx264, tvX265, tvXVID\n68 \"\"\"\n69 \n70 schema = {\n71 'type': 'object',\n72 'properties': {\n73 'rss_key': {'type': 'string'},\n74 'uid': {'type': 'string'},\n75 'passkey': {'type': 'string'},\n76 'cfduid': {'type': 'string'},\n77 'category': {\n78 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n79 },\n80 },\n81 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n82 'additionalProperties': False,\n83 }\n84 \n85 # urlrewriter API\n86 def url_rewritable(self, task, entry):\n87 url = entry['url']\n88 if url.find('.torrent'):\n89 return False\n90 if url.startswith('https://www.torrentday.com'):\n91 return True\n92 return False\n93 \n94 # urlrewriter API\n95 def url_rewrite(self, task, entry):\n96 if 'url' not in entry:\n97 logger.error('Didn\\'t actually get a URL...')\n98 else:\n99 logger.debug('Got the URL: {}', entry['url'])\n100 if entry['url'].startswith('https://www.torrentday.com/browse'):\n101 # use search\n102 results = self.search(task, entry)\n103 if not results:\n104 raise UrlRewritingError('No search results found')\n105 entry['url'] = results[0]['url']\n106 \n107 @plugin.internet(logger)\n108 def search(self, task, entry, config=None):\n109 \"\"\"\n110 Search for name from torrentday.\n111 \"\"\"\n112 \n113 categories = config.get('category', 'all')\n114 # Make sure categories is a list\n115 if not isinstance(categories, list):\n116 categories = [categories]\n117 # If there are any text categories, turn them into their id number\n118 categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n119 params = {'cata': 'yes', 'clear-new': 1}\n120 params.update({str(c): 1 for c in categories})\n121 \n122 entries = set()\n123 for search_string in entry.get('search_strings', [entry['title']]):\n124 \n125 url = 'https://www.torrentday.com/t'\n126 params['q'] = normalize_unicode(search_string).replace(':', '')\n127 cookies = {\n128 'uid': config['uid'],\n129 'pass': config['passkey'],\n130 '__cfduid': config['cfduid'],\n131 }\n132 \n133 try:\n134 page = requests.get(url, params=params, cookies=cookies).content\n135 except RequestException as e:\n136 raise PluginError('Could not connect to torrentday: {}'.format(e))\n137 \n138 # the following should avoid table being None due to a malformed\n139 # html in td search results\n140 soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling\n141 table = soup.find('table', {'id': 'torrentTable'})\n142 if table is None:\n143 raise PluginError(\n144 'Search returned by torrentday appears to be empty or malformed.'\n145 )\n146 \n147 # the first row is the header so skip it\n148 for tr in table.find_all('tr')[1:]:\n149 entry = Entry()\n150 # find the torrent names\n151 td = tr.find('td', {'class': 'torrentNameInfo'})\n152 if not td:\n153 logger.warning('Could not find entry torrentNameInfo for {}.', search_string)\n154 continue\n155 title = td.find('a')\n156 if not title:\n157 logger.warning('Could not determine title for {}.', search_string)\n158 continue\n159 entry['title'] = title.contents[0]\n160 logger.debug('title: {}', title.contents[0])\n161 \n162 # find download link\n163 torrent_url = tr.find('td', {'class': 'ac'})\n164 if not torrent_url:\n165 logger.warning('Could not determine download link for {}.', search_string)\n166 continue\n167 torrent_url = torrent_url.find('a').get('href')\n168 \n169 # construct download URL\n170 torrent_url = (\n171 'https://www.torrentday.com/'\n172 + torrent_url\n173 + '?torrent_pass='\n174 + config['rss_key']\n175 )\n176 logger.debug('RSS-ified download link: {}', torrent_url)\n177 entry['url'] = torrent_url\n178 \n179 # us tr object for seeders/leechers\n180 seeders = tr.find('td', {'class': 'ac seedersInfo'})\n181 leechers = tr.find('td', {'class': 'ac leechersInfo'})\n182 entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n183 entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n184 entry['torrent_availability'] = torrent_availability(\n185 entry['torrent_seeds'], entry['torrent_leeches']\n186 )\n187 \n188 # use tr object for size\n189 size = tr.find('td', text=re.compile(r'([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n190 size = re.search(r'([\\.\\d]+) ([TGMKk]?)B', str(size))\n191 \n192 entry['content_size'] = parse_filesize(size.group(0))\n193 \n194 entries.add(entry)\n195 \n196 return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))\n197 \n198 \n199 @event('plugin.register')\n200 def register_plugin():\n201 plugin.register(\n202 UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2\n203 )\n204\n[end of flexget/components/sites/sites/torrentday.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py\n--- a/flexget/components/sites/sites/torrentday.py\n+++ b/flexget/components/sites/sites/torrentday.py\n@@ -6,6 +6,7 @@\n from flexget import plugin\n from flexget.components.sites.urlrewriting import UrlRewritingError\n from flexget.components.sites.utils import normalize_unicode, torrent_availability\n+from flexget.config_schema import one_or_more\n from flexget.entry import Entry\n from flexget.event import event\n from flexget.plugin import PluginError\n@@ -69,12 +70,12 @@\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n- 'uid': {'type': 'string'},\n+ 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n- 'category': {\n- 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n- },\n+ 'category': one_or_more(\n+ {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]}\n+ ),\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n@@ -125,7 +126,7 @@\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n- 'uid': config['uid'],\n+ 'uid': str(config['uid']),\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py\\n--- a/flexget/components/sites/sites/torrentday.py\\n+++ b/flexget/components/sites/sites/torrentday.py\\n@@ -6,6 +6,7 @@\\n from flexget import plugin\\n from flexget.components.sites.urlrewriting import UrlRewritingError\\n from flexget.components.sites.utils import normalize_unicode, torrent_availability\\n+from flexget.config_schema import one_or_more\\n from flexget.entry import Entry\\n from flexget.event import event\\n from flexget.plugin import PluginError\\n@@ -69,12 +70,12 @@\\n 'type': 'object',\\n 'properties': {\\n 'rss_key': {'type': 'string'},\\n- 'uid': {'type': 'string'},\\n+ 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]},\\n 'passkey': {'type': 'string'},\\n 'cfduid': {'type': 'string'},\\n- 'category': {\\n- 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\\n- },\\n+ 'category': one_or_more(\\n+ {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]}\\n+ ),\\n },\\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\\n 'additionalProperties': False,\\n@@ -125,7 +126,7 @@\\n url = 'https://www.torrentday.com/t'\\n params['q'] = normalize_unicode(search_string).replace(':', '')\\n cookies = {\\n- 'uid': config['uid'],\\n+ 'uid': str(config['uid']),\\n 'pass': config['passkey'],\\n '__cfduid': config['cfduid'],\\n }\\n\", \"issue\": \"torrentday pluging winth uid secret gets error expecting string\\n### Expected behaviour:\\r\\n\\r\\nSetting uid from secrets file should work and update config\\r\\n\\r\\n### Actual behaviour:\\r\\n\\r\\nWhen setting the uid from a secret file, since uid is a number, it's converted to integer on the config, this makes a error in the config compilation of:\\r\\n\\r\\n/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\\r\\n\\r\\n### Steps to reproduce:\\r\\n- Step 1: Add config and run flexget check\\r\\n\\r\\n#### Config:\\r\\n```yaml\\r\\n TV_TorrentDay:\\r\\n discover:\\r\\n what:\\r\\n - next_series_episodes: yes\\r\\n from:\\r\\n - torrentday:\\r\\n uid: \\\"{? torrentday.uid ?}\\\"\\r\\n passkey: '{? torrentday.passkey ?}'\\r\\n cfduid: '{? torrentday.cfduid ?}'\\r\\n rss_key: '{? torrentday.rss_key ?}'\\r\\n category: 'tvBRD'\\r\\n```\\r\\nsecrets.yaml\\r\\n```yaml\\r\\ntorrentday:\\r\\n uid: \\\"1234567\\\"\\r\\n passkey: \\\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\\"\\r\\n cfduid: \\\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\\"\\r\\n rss_key: \\\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\\"\\r\\n```\\r\\n \\r\\n#### Log:\\r\\n
\\r\\n (click to expand)\\r\\n\\r\\n```\\r\\n2021-02-16 01:29:54 CRITICAL manager [/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\\r\\n2021-02-16 01:29:54 CRITICAL manager Failed to load config file: Did not pass schema validation.\\r\\n\\r\\n```\\r\\n
\\r\\n\\r\\n### Additional information:\\r\\n\\r\\n- FlexGet version: 3.1.102\\r\\n- Python version: 3.7.3\\r\\n- Installation method: venv\\r\\n- Using daemon (yes/no): yes\\r\\n- OS and version: debian 10\\n\", \"before_files\": [{\"content\": \"import re\\n\\nfrom loguru import logger\\nfrom requests.exceptions import RequestException\\n\\nfrom flexget import plugin\\nfrom flexget.components.sites.urlrewriting import UrlRewritingError\\nfrom flexget.components.sites.utils import normalize_unicode, torrent_availability\\nfrom flexget.entry import Entry\\nfrom flexget.event import event\\nfrom flexget.plugin import PluginError\\nfrom flexget.utils import requests\\nfrom flexget.utils.soup import get_soup\\nfrom flexget.utils.tools import parse_filesize\\n\\nlogger = logger.bind(name='torrentday')\\n\\nCATEGORIES = {\\n 'all': 0,\\n # Movies\\n 'mov4k': 96,\\n 'mov480p': 25,\\n 'movHD': 11,\\n 'movBD': 5,\\n 'movDVD': 3,\\n 'movMP4': 21,\\n 'movNonEnglish': 22,\\n 'movPACKS': 13,\\n 'movSDx264': 44,\\n 'movX265': 48,\\n 'movXVID': 1,\\n # TV\\n 'tv480p': 24,\\n 'tvBRD': 32,\\n 'tvDVD': 31,\\n 'tvDVDrip': 33,\\n 'tvMOBILE': 46,\\n 'tvNonEnglish': 82,\\n 'tvPACKS': 14,\\n 'tvSDx264': 26,\\n 'tvHDx264': 7,\\n 'tvX265': 34,\\n 'tvXVID': 2,\\n}\\n\\n\\nclass UrlRewriteTorrentday:\\n \\\"\\\"\\\"\\n Torrentday urlrewriter and search plugin.\\n\\n torrentday:\\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\\n rss_key: xxxxxxxxx (required) get this from your profile page\\n category: xxxxxxxx\\n\\n Category can be one of\\n ID from browsing site OR 'name'\\n movies:\\n mov4k, mov480p, movHD, movBD, movDVD,\\n movMP4, movNonEnglish, movPACKS,\\n movSDx264, movX265, movXVID\\n tv:\\n tv480p, tvBRD, tvDVD, tvDVDrip,\\n tvMOBILE, tvNonEnglish, tvPACKS,\\n tvSDx264, tvHDx264, tvX265, tvXVID\\n \\\"\\\"\\\"\\n\\n schema = {\\n 'type': 'object',\\n 'properties': {\\n 'rss_key': {'type': 'string'},\\n 'uid': {'type': 'string'},\\n 'passkey': {'type': 'string'},\\n 'cfduid': {'type': 'string'},\\n 'category': {\\n 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\\n },\\n },\\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\\n 'additionalProperties': False,\\n }\\n\\n # urlrewriter API\\n def url_rewritable(self, task, entry):\\n url = entry['url']\\n if url.find('.torrent'):\\n return False\\n if url.startswith('https://www.torrentday.com'):\\n return True\\n return False\\n\\n # urlrewriter API\\n def url_rewrite(self, task, entry):\\n if 'url' not in entry:\\n logger.error('Didn\\\\'t actually get a URL...')\\n else:\\n logger.debug('Got the URL: {}', entry['url'])\\n if entry['url'].startswith('https://www.torrentday.com/browse'):\\n # use search\\n results = self.search(task, entry)\\n if not results:\\n raise UrlRewritingError('No search results found')\\n entry['url'] = results[0]['url']\\n\\n @plugin.internet(logger)\\n def search(self, task, entry, config=None):\\n \\\"\\\"\\\"\\n Search for name from torrentday.\\n \\\"\\\"\\\"\\n\\n categories = config.get('category', 'all')\\n # Make sure categories is a list\\n if not isinstance(categories, list):\\n categories = [categories]\\n # If there are any text categories, turn them into their id number\\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\\n params = {'cata': 'yes', 'clear-new': 1}\\n params.update({str(c): 1 for c in categories})\\n\\n entries = set()\\n for search_string in entry.get('search_strings', [entry['title']]):\\n\\n url = 'https://www.torrentday.com/t'\\n params['q'] = normalize_unicode(search_string).replace(':', '')\\n cookies = {\\n 'uid': config['uid'],\\n 'pass': config['passkey'],\\n '__cfduid': config['cfduid'],\\n }\\n\\n try:\\n page = requests.get(url, params=params, cookies=cookies).content\\n except RequestException as e:\\n raise PluginError('Could not connect to torrentday: {}'.format(e))\\n\\n # the following should avoid table being None due to a malformed\\n # html in td search results\\n soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling\\n table = soup.find('table', {'id': 'torrentTable'})\\n if table is None:\\n raise PluginError(\\n 'Search returned by torrentday appears to be empty or malformed.'\\n )\\n\\n # the first row is the header so skip it\\n for tr in table.find_all('tr')[1:]:\\n entry = Entry()\\n # find the torrent names\\n td = tr.find('td', {'class': 'torrentNameInfo'})\\n if not td:\\n logger.warning('Could not find entry torrentNameInfo for {}.', search_string)\\n continue\\n title = td.find('a')\\n if not title:\\n logger.warning('Could not determine title for {}.', search_string)\\n continue\\n entry['title'] = title.contents[0]\\n logger.debug('title: {}', title.contents[0])\\n\\n # find download link\\n torrent_url = tr.find('td', {'class': 'ac'})\\n if not torrent_url:\\n logger.warning('Could not determine download link for {}.', search_string)\\n continue\\n torrent_url = torrent_url.find('a').get('href')\\n\\n # construct download URL\\n torrent_url = (\\n 'https://www.torrentday.com/'\\n + torrent_url\\n + '?torrent_pass='\\n + config['rss_key']\\n )\\n logger.debug('RSS-ified download link: {}', torrent_url)\\n entry['url'] = torrent_url\\n\\n # us tr object for seeders/leechers\\n seeders = tr.find('td', {'class': 'ac seedersInfo'})\\n leechers = tr.find('td', {'class': 'ac leechersInfo'})\\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\\n entry['torrent_availability'] = torrent_availability(\\n entry['torrent_seeds'], entry['torrent_leeches']\\n )\\n\\n # use tr object for size\\n size = tr.find('td', text=re.compile(r'([\\\\.\\\\d]+) ([TGMKk]?)B')).contents[0]\\n size = re.search(r'([\\\\.\\\\d]+) ([TGMKk]?)B', str(size))\\n\\n entry['content_size'] = parse_filesize(size.group(0))\\n\\n entries.add(entry)\\n\\n return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))\\n\\n\\n@event('plugin.register')\\ndef register_plugin():\\n plugin.register(\\n UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2\\n )\\n\", \"path\": \"flexget/components/sites/sites/torrentday.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3267,"string":"3,267"},"num_tokens_diff":{"kind":"number","value":413,"string":"413"}}},{"rowIdx":18177,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_20437"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"graspologic-org__graspologic-488"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nRemove * imports in subgraph, utils\nthe` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.\n\n\n\n[start of graspy/utils/__init__.py]\n1 # Copyright (c) Microsoft Corporation and contributors.\n2 # Licensed under the MIT License.\n3 \n4 from .utils import *\n5 from .ptr import *\n6\n[end of graspy/utils/__init__.py]\n[start of graspy/subgraph/__init__.py]\n1 # Copyright (c) Microsoft Corporation and contributors.\n2 # Licensed under the MIT License.\n3 \n4 from .sg import *\n5\n[end of graspy/subgraph/__init__.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py\n--- a/graspy/subgraph/__init__.py\n+++ b/graspy/subgraph/__init__.py\n@@ -1,4 +1,7 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .sg import *\n+\n+from .sg import SignalSubgraph\n+\n+__all__ = [SignalSubgraph]\ndiff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py\n--- a/graspy/utils/__init__.py\n+++ b/graspy/utils/__init__.py\n@@ -1,5 +1,42 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .utils import *\n-from .ptr import *\n+from .utils import (\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+)\n+from .ptr import pass_to_ranks\n+\n+__all__ = [\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+ pass_to_ranks,\n+]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py\\n--- a/graspy/subgraph/__init__.py\\n+++ b/graspy/subgraph/__init__.py\\n@@ -1,4 +1,7 @@\\n # Copyright (c) Microsoft Corporation and contributors.\\n # Licensed under the MIT License.\\n \\n-from .sg import *\\n+\\n+from .sg import SignalSubgraph\\n+\\n+__all__ = [SignalSubgraph]\\ndiff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py\\n--- a/graspy/utils/__init__.py\\n+++ b/graspy/utils/__init__.py\\n@@ -1,5 +1,42 @@\\n # Copyright (c) Microsoft Corporation and contributors.\\n # Licensed under the MIT License.\\n \\n-from .utils import *\\n-from .ptr import *\\n+from .utils import (\\n+ import_graph,\\n+ import_edgelist,\\n+ is_symmetric,\\n+ is_loopless,\\n+ is_unweighted,\\n+ is_almost_symmetric,\\n+ symmetrize,\\n+ remove_loops,\\n+ to_laplace,\\n+ is_fully_connected,\\n+ get_lcc,\\n+ get_multigraph_union_lcc,\\n+ get_multigraph_intersect_lcc,\\n+ augment_diagonal,\\n+ binarize,\\n+ cartprod,\\n+)\\n+from .ptr import pass_to_ranks\\n+\\n+__all__ = [\\n+ import_graph,\\n+ import_edgelist,\\n+ is_symmetric,\\n+ is_loopless,\\n+ is_unweighted,\\n+ is_almost_symmetric,\\n+ symmetrize,\\n+ remove_loops,\\n+ to_laplace,\\n+ is_fully_connected,\\n+ get_lcc,\\n+ get_multigraph_union_lcc,\\n+ get_multigraph_intersect_lcc,\\n+ augment_diagonal,\\n+ binarize,\\n+ cartprod,\\n+ pass_to_ranks,\\n+]\\n\", \"issue\": \"Remove * imports in subgraph, utils\\nthe` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.\\n\", \"before_files\": [{\"content\": \"# Copyright (c) Microsoft Corporation and contributors.\\n# Licensed under the MIT License.\\n\\nfrom .utils import *\\nfrom .ptr import *\\n\", \"path\": \"graspy/utils/__init__.py\"}, {\"content\": \"# Copyright (c) Microsoft Corporation and contributors.\\n# Licensed under the MIT License.\\n\\nfrom .sg import *\\n\", \"path\": \"graspy/subgraph/__init__.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":662,"string":"662"},"num_tokens_diff":{"kind":"number","value":429,"string":"429"}}},{"rowIdx":18178,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_27136"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"getmoto__moto-923"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nCloudformation doesn`t work with yaml templates\nWhen i try to use moto to mock a call to `create_stack` the following happens:\r\n\r\nTraceback:\r\n```\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 107, in dispatch\r\n return cls()._dispatch(*args, **kwargs)\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 167, in _dispatch\r\n return self.call_action()\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 183, in call_action\r\n response = method()\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/responses.py\", line 51, in create_stack\r\n role_arn=role_arn,\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\", line 126, in create_stack\r\n role_arn=role_arn,\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\", line 18, in __init__\r\n self.template_dict = json.loads(self.template)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py\", line 354, in loads\r\n return _default_decoder.decode(s)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\", line 339, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\", line 357, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\n#### How to reproduce\r\n\r\n```python\r\n# coding: utf-8\r\n\r\nimport unittest\r\nfrom boto.cloudformation import connect_to_region\r\nfrom moto import mock_cloudformation\r\n\r\n\r\nclass TestCase(unittest.TestCase):\r\n\r\n @mock_cloudformation\r\n def test_cloudformation_create_stack(self):\r\n\t connection = connect_to_region('sa-east-1')\r\n\t with open('ec2.yml') as fp:\r\n template = fp.read()\r\n\r\n connection.create_stack('test-stack', template_body=template)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n```\r\n\r\nTemplate: ec2.yaml\r\n```yaml\r\nResources:\r\n BastionInstance:\r\n Type: \"AWS::EC2::Instance\"\r\n Properties: \r\n AvailabilityZone: sa-east-1a\r\n DisableApiTermination: false\r\n ImageId: ami-37cfad5b\r\n InstanceType: t2.micro\r\n```\r\n\r\nMoto version: 0.4.31\r\nBoto Version: 2.46.1\r\nPython Version: Python 3.6.0rc1\r\nSystem: Darwin Iurys-MacBook-Pro.local 15.5.0 Darwin Kernel Version 15.5.0: Tue Apr 19 18:36:36 PDT 2016; root:xnu-3248.50.21~8/RELEASE_X86_64 x86_64\r\n\n\n\n\n[start of setup.py]\n1 #!/usr/bin/env python\n2 from __future__ import unicode_literals\n3 from setuptools import setup, find_packages\n4 \n5 install_requires = [\n6 \"Jinja2>=2.8\",\n7 \"boto>=2.36.0\",\n8 \"cookies\",\n9 \"requests>=2.0\",\n10 \"xmltodict\",\n11 \"dicttoxml\",\n12 \"six\",\n13 \"werkzeug\",\n14 \"pytz\",\n15 \"python-dateutil\",\n16 ]\n17 \n18 extras_require = {\n19 'server': ['flask'],\n20 }\n21 \n22 setup(\n23 name='moto',\n24 version='0.4.31',\n25 description='A library that allows your python tests to easily'\n26 ' mock out the boto library',\n27 author='Steve Pulec',\n28 author_email='spulec@gmail.com',\n29 url='https://github.com/spulec/moto',\n30 entry_points={\n31 'console_scripts': [\n32 'moto_server = moto.server:main',\n33 ],\n34 },\n35 packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n36 install_requires=install_requires,\n37 extras_require=extras_require,\n38 license=\"Apache\",\n39 test_suite=\"tests\",\n40 classifiers=[\n41 \"Programming Language :: Python :: 2\",\n42 \"Programming Language :: Python :: 2.7\",\n43 \"Programming Language :: Python :: 3\",\n44 \"Programming Language :: Python :: 3.3\",\n45 \"License :: OSI Approved :: Apache Software License\",\n46 \"Topic :: Software Development :: Testing\",\n47 ],\n48 )\n49\n[end of setup.py]\n[start of moto/cloudformation/models.py]\n1 from __future__ import unicode_literals\n2 from datetime import datetime\n3 import json\n4 import uuid\n5 \n6 import boto.cloudformation\n7 from moto.core import BaseBackend, BaseModel\n8 \n9 from .parsing import ResourceMap, OutputMap\n10 from .utils import generate_stack_id\n11 from .exceptions import ValidationError\n12 \n13 \n14 class FakeStack(BaseModel):\n15 \n16 def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n17 self.stack_id = stack_id\n18 self.name = name\n19 self.template = template\n20 self.template_dict = json.loads(self.template)\n21 self.parameters = parameters\n22 self.region_name = region_name\n23 self.notification_arns = notification_arns if notification_arns else []\n24 self.role_arn = role_arn\n25 self.tags = tags if tags else {}\n26 self.events = []\n27 self._add_stack_event(\"CREATE_IN_PROGRESS\",\n28 resource_status_reason=\"User Initiated\")\n29 \n30 self.description = self.template_dict.get('Description')\n31 self.resource_map = self._create_resource_map()\n32 self.output_map = self._create_output_map()\n33 self._add_stack_event(\"CREATE_COMPLETE\")\n34 self.status = 'CREATE_COMPLETE'\n35 \n36 def _create_resource_map(self):\n37 resource_map = ResourceMap(\n38 self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict)\n39 resource_map.create()\n40 return resource_map\n41 \n42 def _create_output_map(self):\n43 output_map = OutputMap(self.resource_map, self.template_dict)\n44 output_map.create()\n45 return output_map\n46 \n47 def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n48 self.events.append(FakeEvent(\n49 stack_id=self.stack_id,\n50 stack_name=self.name,\n51 logical_resource_id=self.name,\n52 physical_resource_id=self.stack_id,\n53 resource_type=\"AWS::CloudFormation::Stack\",\n54 resource_status=resource_status,\n55 resource_status_reason=resource_status_reason,\n56 resource_properties=resource_properties,\n57 ))\n58 \n59 def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n60 # not used yet... feel free to help yourself\n61 resource = self.resource_map[logical_resource_id]\n62 self.events.append(FakeEvent(\n63 stack_id=self.stack_id,\n64 stack_name=self.name,\n65 logical_resource_id=logical_resource_id,\n66 physical_resource_id=resource.physical_resource_id,\n67 resource_type=resource.type,\n68 resource_status=resource_status,\n69 resource_status_reason=resource_status_reason,\n70 resource_properties=resource_properties,\n71 ))\n72 \n73 @property\n74 def stack_parameters(self):\n75 return self.resource_map.resolved_parameters\n76 \n77 @property\n78 def stack_resources(self):\n79 return self.resource_map.values()\n80 \n81 @property\n82 def stack_outputs(self):\n83 return self.output_map.values()\n84 \n85 def update(self, template, role_arn=None, parameters=None, tags=None):\n86 self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n87 self.template = template\n88 self.resource_map.update(json.loads(template), parameters)\n89 self.output_map = self._create_output_map()\n90 self._add_stack_event(\"UPDATE_COMPLETE\")\n91 self.status = \"UPDATE_COMPLETE\"\n92 self.role_arn = role_arn\n93 # only overwrite tags if passed\n94 if tags is not None:\n95 self.tags = tags\n96 # TODO: update tags in the resource map\n97 \n98 def delete(self):\n99 self._add_stack_event(\"DELETE_IN_PROGRESS\",\n100 resource_status_reason=\"User Initiated\")\n101 self.resource_map.delete()\n102 self._add_stack_event(\"DELETE_COMPLETE\")\n103 self.status = \"DELETE_COMPLETE\"\n104 \n105 \n106 class FakeEvent(BaseModel):\n107 \n108 def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n109 self.stack_id = stack_id\n110 self.stack_name = stack_name\n111 self.logical_resource_id = logical_resource_id\n112 self.physical_resource_id = physical_resource_id\n113 self.resource_type = resource_type\n114 self.resource_status = resource_status\n115 self.resource_status_reason = resource_status_reason\n116 self.resource_properties = resource_properties\n117 self.timestamp = datetime.utcnow()\n118 self.event_id = uuid.uuid4()\n119 \n120 \n121 class CloudFormationBackend(BaseBackend):\n122 \n123 def __init__(self):\n124 self.stacks = {}\n125 self.deleted_stacks = {}\n126 \n127 def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n128 stack_id = generate_stack_id(name)\n129 new_stack = FakeStack(\n130 stack_id=stack_id,\n131 name=name,\n132 template=template,\n133 parameters=parameters,\n134 region_name=region_name,\n135 notification_arns=notification_arns,\n136 tags=tags,\n137 role_arn=role_arn,\n138 )\n139 self.stacks[stack_id] = new_stack\n140 return new_stack\n141 \n142 def describe_stacks(self, name_or_stack_id):\n143 stacks = self.stacks.values()\n144 if name_or_stack_id:\n145 for stack in stacks:\n146 if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n147 return [stack]\n148 if self.deleted_stacks:\n149 deleted_stacks = self.deleted_stacks.values()\n150 for stack in deleted_stacks:\n151 if stack.stack_id == name_or_stack_id:\n152 return [stack]\n153 raise ValidationError(name_or_stack_id)\n154 else:\n155 return stacks\n156 \n157 def list_stacks(self):\n158 return self.stacks.values()\n159 \n160 def get_stack(self, name_or_stack_id):\n161 all_stacks = dict(self.deleted_stacks, **self.stacks)\n162 if name_or_stack_id in all_stacks:\n163 # Lookup by stack id - deleted stacks incldued\n164 return all_stacks[name_or_stack_id]\n165 else:\n166 # Lookup by stack name - undeleted stacks only\n167 for stack in self.stacks.values():\n168 if stack.name == name_or_stack_id:\n169 return stack\n170 \n171 def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n172 stack = self.get_stack(name)\n173 stack.update(template, role_arn, parameters=parameters, tags=tags)\n174 return stack\n175 \n176 def list_stack_resources(self, stack_name_or_id):\n177 stack = self.get_stack(stack_name_or_id)\n178 return stack.stack_resources\n179 \n180 def delete_stack(self, name_or_stack_id):\n181 if name_or_stack_id in self.stacks:\n182 # Delete by stack id\n183 stack = self.stacks.pop(name_or_stack_id, None)\n184 stack.delete()\n185 self.deleted_stacks[stack.stack_id] = stack\n186 return self.stacks.pop(name_or_stack_id, None)\n187 else:\n188 # Delete by stack name\n189 for stack in list(self.stacks.values()):\n190 if stack.name == name_or_stack_id:\n191 self.delete_stack(stack.stack_id)\n192 \n193 \n194 cloudformation_backends = {}\n195 for region in boto.cloudformation.regions():\n196 cloudformation_backends[region.name] = CloudFormationBackend()\n197\n[end of moto/cloudformation/models.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py\n--- a/moto/cloudformation/models.py\n+++ b/moto/cloudformation/models.py\n@@ -1,6 +1,7 @@\n from __future__ import unicode_literals\n from datetime import datetime\n import json\n+import yaml\n import uuid\n \n import boto.cloudformation\n@@ -17,7 +18,7 @@\n self.stack_id = stack_id\n self.name = name\n self.template = template\n- self.template_dict = json.loads(self.template)\n+ self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n@@ -70,6 +71,12 @@\n resource_properties=resource_properties,\n ))\n \n+ def _parse_template(self):\n+ try:\n+ self.template_dict = yaml.load(self.template)\n+ except yaml.parser.ParserError:\n+ self.template_dict = json.loads(self.template)\n+\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,6 +11,7 @@\n \"dicttoxml\",\n \"six\",\n \"werkzeug\",\n+ \"pyaml\",\n \"pytz\",\n \"python-dateutil\",\n ]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py\\n--- a/moto/cloudformation/models.py\\n+++ b/moto/cloudformation/models.py\\n@@ -1,6 +1,7 @@\\n from __future__ import unicode_literals\\n from datetime import datetime\\n import json\\n+import yaml\\n import uuid\\n \\n import boto.cloudformation\\n@@ -17,7 +18,7 @@\\n self.stack_id = stack_id\\n self.name = name\\n self.template = template\\n- self.template_dict = json.loads(self.template)\\n+ self._parse_template()\\n self.parameters = parameters\\n self.region_name = region_name\\n self.notification_arns = notification_arns if notification_arns else []\\n@@ -70,6 +71,12 @@\\n resource_properties=resource_properties,\\n ))\\n \\n+ def _parse_template(self):\\n+ try:\\n+ self.template_dict = yaml.load(self.template)\\n+ except yaml.parser.ParserError:\\n+ self.template_dict = json.loads(self.template)\\n+\\n @property\\n def stack_parameters(self):\\n return self.resource_map.resolved_parameters\\ndiff --git a/setup.py b/setup.py\\n--- a/setup.py\\n+++ b/setup.py\\n@@ -11,6 +11,7 @@\\n \\\"dicttoxml\\\",\\n \\\"six\\\",\\n \\\"werkzeug\\\",\\n+ \\\"pyaml\\\",\\n \\\"pytz\\\",\\n \\\"python-dateutil\\\",\\n ]\\n\", \"issue\": \"Cloudformation doesn`t work with yaml templates\\nWhen i try to use moto to mock a call to `create_stack` the following happens:\\r\\n\\r\\nTraceback:\\r\\n```\\r\\n File \\\"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\\\", line 107, in dispatch\\r\\n return cls()._dispatch(*args, **kwargs)\\r\\n File \\\"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\\\", line 167, in _dispatch\\r\\n return self.call_action()\\r\\n File \\\"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\\\", line 183, in call_action\\r\\n response = method()\\r\\n File \\\"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/responses.py\\\", line 51, in create_stack\\r\\n role_arn=role_arn,\\r\\n File \\\"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\\\", line 126, in create_stack\\r\\n role_arn=role_arn,\\r\\n File \\\"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\\\", line 18, in __init__\\r\\n self.template_dict = json.loads(self.template)\\r\\n File \\\"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py\\\", line 354, in loads\\r\\n return _default_decoder.decode(s)\\r\\n File \\\"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\\\", line 339, in decode\\r\\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\\r\\n File \\\"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\\\", line 357, in raw_decode\\r\\n raise JSONDecodeError(\\\"Expecting value\\\", s, err.value) from None\\r\\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\\r\\n```\\r\\n\\r\\n#### How to reproduce\\r\\n\\r\\n```python\\r\\n# coding: utf-8\\r\\n\\r\\nimport unittest\\r\\nfrom boto.cloudformation import connect_to_region\\r\\nfrom moto import mock_cloudformation\\r\\n\\r\\n\\r\\nclass TestCase(unittest.TestCase):\\r\\n\\r\\n @mock_cloudformation\\r\\n def test_cloudformation_create_stack(self):\\r\\n\\t connection = connect_to_region('sa-east-1')\\r\\n\\t with open('ec2.yml') as fp:\\r\\n template = fp.read()\\r\\n\\r\\n connection.create_stack('test-stack', template_body=template)\\r\\n\\r\\n\\r\\nif __name__ == '__main__':\\r\\n unittest.main()\\r\\n```\\r\\n\\r\\nTemplate: ec2.yaml\\r\\n```yaml\\r\\nResources:\\r\\n BastionInstance:\\r\\n Type: \\\"AWS::EC2::Instance\\\"\\r\\n Properties: \\r\\n AvailabilityZone: sa-east-1a\\r\\n DisableApiTermination: false\\r\\n ImageId: ami-37cfad5b\\r\\n InstanceType: t2.micro\\r\\n```\\r\\n\\r\\nMoto version: 0.4.31\\r\\nBoto Version: 2.46.1\\r\\nPython Version: Python 3.6.0rc1\\r\\nSystem: Darwin Iurys-MacBook-Pro.local 15.5.0 Darwin Kernel Version 15.5.0: Tue Apr 19 18:36:36 PDT 2016; root:xnu-3248.50.21~8/RELEASE_X86_64 x86_64\\r\\n\\n\", \"before_files\": [{\"content\": \"#!/usr/bin/env python\\nfrom __future__ import unicode_literals\\nfrom setuptools import setup, find_packages\\n\\ninstall_requires = [\\n \\\"Jinja2>=2.8\\\",\\n \\\"boto>=2.36.0\\\",\\n \\\"cookies\\\",\\n \\\"requests>=2.0\\\",\\n \\\"xmltodict\\\",\\n \\\"dicttoxml\\\",\\n \\\"six\\\",\\n \\\"werkzeug\\\",\\n \\\"pytz\\\",\\n \\\"python-dateutil\\\",\\n]\\n\\nextras_require = {\\n 'server': ['flask'],\\n}\\n\\nsetup(\\n name='moto',\\n version='0.4.31',\\n description='A library that allows your python tests to easily'\\n ' mock out the boto library',\\n author='Steve Pulec',\\n author_email='spulec@gmail.com',\\n url='https://github.com/spulec/moto',\\n entry_points={\\n 'console_scripts': [\\n 'moto_server = moto.server:main',\\n ],\\n },\\n packages=find_packages(exclude=(\\\"tests\\\", \\\"tests.*\\\")),\\n install_requires=install_requires,\\n extras_require=extras_require,\\n license=\\\"Apache\\\",\\n test_suite=\\\"tests\\\",\\n classifiers=[\\n \\\"Programming Language :: Python :: 2\\\",\\n \\\"Programming Language :: Python :: 2.7\\\",\\n \\\"Programming Language :: Python :: 3\\\",\\n \\\"Programming Language :: Python :: 3.3\\\",\\n \\\"License :: OSI Approved :: Apache Software License\\\",\\n \\\"Topic :: Software Development :: Testing\\\",\\n ],\\n)\\n\", \"path\": \"setup.py\"}, {\"content\": \"from __future__ import unicode_literals\\nfrom datetime import datetime\\nimport json\\nimport uuid\\n\\nimport boto.cloudformation\\nfrom moto.core import BaseBackend, BaseModel\\n\\nfrom .parsing import ResourceMap, OutputMap\\nfrom .utils import generate_stack_id\\nfrom .exceptions import ValidationError\\n\\n\\nclass FakeStack(BaseModel):\\n\\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\\n self.stack_id = stack_id\\n self.name = name\\n self.template = template\\n self.template_dict = json.loads(self.template)\\n self.parameters = parameters\\n self.region_name = region_name\\n self.notification_arns = notification_arns if notification_arns else []\\n self.role_arn = role_arn\\n self.tags = tags if tags else {}\\n self.events = []\\n self._add_stack_event(\\\"CREATE_IN_PROGRESS\\\",\\n resource_status_reason=\\\"User Initiated\\\")\\n\\n self.description = self.template_dict.get('Description')\\n self.resource_map = self._create_resource_map()\\n self.output_map = self._create_output_map()\\n self._add_stack_event(\\\"CREATE_COMPLETE\\\")\\n self.status = 'CREATE_COMPLETE'\\n\\n def _create_resource_map(self):\\n resource_map = ResourceMap(\\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict)\\n resource_map.create()\\n return resource_map\\n\\n def _create_output_map(self):\\n output_map = OutputMap(self.resource_map, self.template_dict)\\n output_map.create()\\n return output_map\\n\\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\\n self.events.append(FakeEvent(\\n stack_id=self.stack_id,\\n stack_name=self.name,\\n logical_resource_id=self.name,\\n physical_resource_id=self.stack_id,\\n resource_type=\\\"AWS::CloudFormation::Stack\\\",\\n resource_status=resource_status,\\n resource_status_reason=resource_status_reason,\\n resource_properties=resource_properties,\\n ))\\n\\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\\n # not used yet... feel free to help yourself\\n resource = self.resource_map[logical_resource_id]\\n self.events.append(FakeEvent(\\n stack_id=self.stack_id,\\n stack_name=self.name,\\n logical_resource_id=logical_resource_id,\\n physical_resource_id=resource.physical_resource_id,\\n resource_type=resource.type,\\n resource_status=resource_status,\\n resource_status_reason=resource_status_reason,\\n resource_properties=resource_properties,\\n ))\\n\\n @property\\n def stack_parameters(self):\\n return self.resource_map.resolved_parameters\\n\\n @property\\n def stack_resources(self):\\n return self.resource_map.values()\\n\\n @property\\n def stack_outputs(self):\\n return self.output_map.values()\\n\\n def update(self, template, role_arn=None, parameters=None, tags=None):\\n self._add_stack_event(\\\"UPDATE_IN_PROGRESS\\\", resource_status_reason=\\\"User Initiated\\\")\\n self.template = template\\n self.resource_map.update(json.loads(template), parameters)\\n self.output_map = self._create_output_map()\\n self._add_stack_event(\\\"UPDATE_COMPLETE\\\")\\n self.status = \\\"UPDATE_COMPLETE\\\"\\n self.role_arn = role_arn\\n # only overwrite tags if passed\\n if tags is not None:\\n self.tags = tags\\n # TODO: update tags in the resource map\\n\\n def delete(self):\\n self._add_stack_event(\\\"DELETE_IN_PROGRESS\\\",\\n resource_status_reason=\\\"User Initiated\\\")\\n self.resource_map.delete()\\n self._add_stack_event(\\\"DELETE_COMPLETE\\\")\\n self.status = \\\"DELETE_COMPLETE\\\"\\n\\n\\nclass FakeEvent(BaseModel):\\n\\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\\n self.stack_id = stack_id\\n self.stack_name = stack_name\\n self.logical_resource_id = logical_resource_id\\n self.physical_resource_id = physical_resource_id\\n self.resource_type = resource_type\\n self.resource_status = resource_status\\n self.resource_status_reason = resource_status_reason\\n self.resource_properties = resource_properties\\n self.timestamp = datetime.utcnow()\\n self.event_id = uuid.uuid4()\\n\\n\\nclass CloudFormationBackend(BaseBackend):\\n\\n def __init__(self):\\n self.stacks = {}\\n self.deleted_stacks = {}\\n\\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\\n stack_id = generate_stack_id(name)\\n new_stack = FakeStack(\\n stack_id=stack_id,\\n name=name,\\n template=template,\\n parameters=parameters,\\n region_name=region_name,\\n notification_arns=notification_arns,\\n tags=tags,\\n role_arn=role_arn,\\n )\\n self.stacks[stack_id] = new_stack\\n return new_stack\\n\\n def describe_stacks(self, name_or_stack_id):\\n stacks = self.stacks.values()\\n if name_or_stack_id:\\n for stack in stacks:\\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\\n return [stack]\\n if self.deleted_stacks:\\n deleted_stacks = self.deleted_stacks.values()\\n for stack in deleted_stacks:\\n if stack.stack_id == name_or_stack_id:\\n return [stack]\\n raise ValidationError(name_or_stack_id)\\n else:\\n return stacks\\n\\n def list_stacks(self):\\n return self.stacks.values()\\n\\n def get_stack(self, name_or_stack_id):\\n all_stacks = dict(self.deleted_stacks, **self.stacks)\\n if name_or_stack_id in all_stacks:\\n # Lookup by stack id - deleted stacks incldued\\n return all_stacks[name_or_stack_id]\\n else:\\n # Lookup by stack name - undeleted stacks only\\n for stack in self.stacks.values():\\n if stack.name == name_or_stack_id:\\n return stack\\n\\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\\n stack = self.get_stack(name)\\n stack.update(template, role_arn, parameters=parameters, tags=tags)\\n return stack\\n\\n def list_stack_resources(self, stack_name_or_id):\\n stack = self.get_stack(stack_name_or_id)\\n return stack.stack_resources\\n\\n def delete_stack(self, name_or_stack_id):\\n if name_or_stack_id in self.stacks:\\n # Delete by stack id\\n stack = self.stacks.pop(name_or_stack_id, None)\\n stack.delete()\\n self.deleted_stacks[stack.stack_id] = stack\\n return self.stacks.pop(name_or_stack_id, None)\\n else:\\n # Delete by stack name\\n for stack in list(self.stacks.values()):\\n if stack.name == name_or_stack_id:\\n self.delete_stack(stack.stack_id)\\n\\n\\ncloudformation_backends = {}\\nfor region in boto.cloudformation.regions():\\n cloudformation_backends[region.name] = CloudFormationBackend()\\n\", \"path\": \"moto/cloudformation/models.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3759,"string":"3,759"},"num_tokens_diff":{"kind":"number","value":306,"string":"306"}}},{"rowIdx":18179,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_19187"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"mozmeao__snippets-service-769"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nCreate a button to copy to clipboard the preview link on click\nPreview link for ASR Snippets cannot be directly clicked and instead must be copied and pasted to the URL bar. Create a `Copy` button next to the link to make the procedure easier. \n\n\n\n[start of snippets/base/admin/adminmodels.py]\n1 import re\n2 \n3 from django.contrib import admin\n4 from django.db.models import TextField, Q\n5 from django.template.loader import get_template\n6 from django.utils.safestring import mark_safe\n7 \n8 from reversion.admin import VersionAdmin\n9 from django_ace import AceWidget\n10 from django_statsd.clients import statsd\n11 from jinja2.meta import find_undeclared_variables\n12 from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n13 \n14 from snippets.base import forms, models\n15 from snippets.base.models import JINJA_ENV\n16 from snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n17 \n18 \n19 MATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\n20 RESERVED_VARIABLES = ('_', 'snippet_id')\n21 \n22 \n23 class ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n24 list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n25 'version', 'locale', 'appbuildid', 'build_target',\n26 'channel', 'os_version', 'distribution',\n27 'distribution_version', 'modified')\n28 list_filter = ('name', 'version', 'os_version', 'appbuildid',\n29 'build_target', 'channel', 'distribution', 'locale')\n30 save_on_top = True\n31 search_fields = ('description',)\n32 \n33 \n34 class LogEntryAdmin(admin.ModelAdmin):\n35 list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n36 list_filter = ('user', 'content_type')\n37 \n38 \n39 class SnippetTemplateVariableInline(admin.TabularInline):\n40 model = models.SnippetTemplateVariable\n41 formset = forms.SnippetTemplateVariableInlineFormset\n42 max_num = 0\n43 can_delete = False\n44 readonly_fields = ('name',)\n45 fields = ('name', 'type', 'order', 'description')\n46 \n47 \n48 class SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n49 save_on_top = True\n50 list_display = ('name', 'priority', 'hidden')\n51 list_filter = ('hidden', 'startpage')\n52 inlines = (SnippetTemplateVariableInline,)\n53 formfield_overrides = {\n54 TextField: {'widget': AceWidget(mode='html', theme='github',\n55 width='1200px', height='500px')},\n56 }\n57 \n58 class Media:\n59 css = {\n60 'all': ('css/admin.css',)\n61 }\n62 \n63 def save_related(self, request, form, formsets, change):\n64 \"\"\"\n65 After saving the related objects, remove and add\n66 SnippetTemplateVariables depending on how the template code changed.\n67 \"\"\"\n68 super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n69 change)\n70 \n71 # Parse the template code and find any undefined variables.\n72 ast = JINJA_ENV.env.parse(form.instance.code)\n73 new_vars = find_undeclared_variables(ast)\n74 var_manager = form.instance.variable_set\n75 \n76 # Filter out reserved variable names.\n77 new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n78 \n79 # Delete variables not in the new set.\n80 var_manager.filter(~Q(name__in=new_vars)).delete()\n81 \n82 # Create variables that don't exist.\n83 for i, variable in enumerate(new_vars, start=1):\n84 obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n85 template=form.instance, name=variable)\n86 if obj.order == 0:\n87 obj.order = i * 10\n88 obj.save()\n89 \n90 \n91 class UploadedFileAdmin(admin.ModelAdmin):\n92 readonly_fields = ('url', 'preview', 'snippets')\n93 list_display = ('name', 'url', 'preview', 'modified')\n94 prepopulated_fields = {'name': ('file',)}\n95 form = forms.UploadedFileAdminForm\n96 \n97 def preview(self, obj):\n98 template = get_template('base/uploadedfile_preview.jinja')\n99 return mark_safe(template.render({'file': obj}))\n100 \n101 def snippets(self, obj):\n102 \"\"\"Snippets using this file.\"\"\"\n103 template = get_template('base/uploadedfile_snippets.jinja')\n104 return mark_safe(template.render({'snippets': obj.snippets}))\n105 \n106 \n107 class AddonAdmin(admin.ModelAdmin):\n108 list_display = ('name', 'guid')\n109 \n110 \n111 class ASRSnippetAdmin(admin.ModelAdmin):\n112 form = forms.ASRSnippetAdminForm\n113 \n114 list_display_links = (\n115 'id',\n116 'name',\n117 )\n118 list_display = (\n119 'id',\n120 'name',\n121 'status',\n122 'modified',\n123 )\n124 list_filter = (\n125 ModifiedFilter,\n126 'status',\n127 ReleaseFilter,\n128 ('template', RelatedDropdownFilter),\n129 )\n130 search_fields = (\n131 'name',\n132 )\n133 autocomplete_fields = (\n134 'campaign',\n135 'target',\n136 )\n137 preserve_filters = True\n138 readonly_fields = (\n139 'created',\n140 'modified',\n141 'uuid',\n142 'creator',\n143 'preview_url',\n144 )\n145 filter_horizontal = ('locales',)\n146 save_on_top = True\n147 save_as = True\n148 view_on_site = False\n149 \n150 fieldsets = (\n151 ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n152 ('Content', {\n153 'description': (\n154 '''\n155 Available deep links:
\n156
    \n157
  1. special:accounts to open Firefox Accounts
  2. \n158
  3. special:appMenu to open the hamburger menu
  4. \n159

\n160 Automatically add Snippet ID:
\n161 You can use [[snippet_id]] in any field and it\n162 will be automatically replaced by Snippet ID when served to users.\n163
\n164 Example: This is a &lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt; # noqa\n165
\n166 '''\n167 ),\n168 'fields': ('template', 'data'),\n169 }),\n170 ('Publishing Options', {\n171 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n172 }),\n173 ('Other Info', {\n174 'fields': ('uuid', ('created', 'modified')),\n175 'classes': ('collapse',)\n176 }),\n177 )\n178 \n179 class Media:\n180 css = {\n181 'all': ('css/admin/ASRSnippetAdmin.css',)\n182 }\n183 \n184 def save_model(self, request, obj, form, change):\n185 obj.creator = request.user\n186 statsd.incr('save.asrsnippet')\n187 super().save_model(request, obj, form, change)\n188 \n189 def preview_url(self, obj):\n190 return obj.get_preview_url()\n191 \n192 \n193 class CampaignAdmin(admin.ModelAdmin):\n194 readonly_fields = ('created', 'modified', 'creator',)\n195 prepopulated_fields = {'slug': ('name',)}\n196 \n197 fieldsets = (\n198 ('ID', {'fields': ('name', 'slug')}),\n199 ('Other Info', {\n200 'fields': ('creator', ('created', 'modified')),\n201 }),\n202 )\n203 search_fields = (\n204 'name',\n205 )\n206 \n207 def save_model(self, request, obj, form, change):\n208 obj.creator = request.user\n209 statsd.incr('save.campaign')\n210 super().save_model(request, obj, form, change)\n211 \n212 \n213 class TargetAdmin(admin.ModelAdmin):\n214 form = forms.TargetAdminForm\n215 readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n216 search_fields = (\n217 'name',\n218 )\n219 fieldsets = (\n220 ('ID', {'fields': ('name',)}),\n221 ('Product channels', {\n222 'description': 'What channels will this snippet be available in?',\n223 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n224 }),\n225 ('Targeting', {\n226 'fields': (\n227 'filtr_is_default_browser',\n228 'filtr_updates_enabled',\n229 'filtr_updates_autodownload_enabled',\n230 'filtr_profile_age_created',\n231 'filtr_firefox_version',\n232 'filtr_previous_session_end',\n233 'filtr_uses_firefox_sync',\n234 'filtr_country',\n235 'filtr_is_developer',\n236 'filtr_current_search_engine',\n237 'filtr_browser_addon',\n238 'filtr_total_bookmarks_count',\n239 )\n240 }),\n241 ('Other Info', {\n242 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n243 }),\n244 )\n245 \n246 def save_model(self, request, obj, form, change):\n247 obj.creator = request.user\n248 statsd.incr('save.target')\n249 super().save_model(request, obj, form, change)\n250\n[end of snippets/base/admin/adminmodels.py]\n
\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\n--- a/snippets/base/admin/adminmodels.py\n+++ b/snippets/base/admin/adminmodels.py\n@@ -180,6 +180,10 @@\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n+ js = (\n+ 'js/admin/clipboard.min.js',\n+ 'js/admin/copy_preview.js',\n+ )\n \n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n@@ -187,7 +191,15 @@\n super().save_model(request, obj, form, change)\n \n def preview_url(self, obj):\n- return obj.get_preview_url()\n+ text = f'''\n+ {obj.get_preview_url()}\n+ \n+ '''\n+ return mark_safe(text)\n \n \n class CampaignAdmin(admin.ModelAdmin):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\\n--- a/snippets/base/admin/adminmodels.py\\n+++ b/snippets/base/admin/adminmodels.py\\n@@ -180,6 +180,10 @@\\n css = {\\n 'all': ('css/admin/ASRSnippetAdmin.css',)\\n }\\n+ js = (\\n+ 'js/admin/clipboard.min.js',\\n+ 'js/admin/copy_preview.js',\\n+ )\\n \\n def save_model(self, request, obj, form, change):\\n obj.creator = request.user\\n@@ -187,7 +191,15 @@\\n super().save_model(request, obj, form, change)\\n \\n def preview_url(self, obj):\\n- return obj.get_preview_url()\\n+ text = f'''\\n+ {obj.get_preview_url()}\\n+ \\n+ '''\\n+ return mark_safe(text)\\n \\n \\n class CampaignAdmin(admin.ModelAdmin):\\n\", \"issue\": \"Create a button to copy to clipboard the preview link on click\\nPreview link for ASR Snippets cannot be directly clicked and instead must be copied and pasted to the URL bar. Create a `Copy` button next to the link to make the procedure easier. \\n\", \"before_files\": [{\"content\": \"import re\\n\\nfrom django.contrib import admin\\nfrom django.db.models import TextField, Q\\nfrom django.template.loader import get_template\\nfrom django.utils.safestring import mark_safe\\n\\nfrom reversion.admin import VersionAdmin\\nfrom django_ace import AceWidget\\nfrom django_statsd.clients import statsd\\nfrom jinja2.meta import find_undeclared_variables\\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\\n\\nfrom snippets.base import forms, models\\nfrom snippets.base.models import JINJA_ENV\\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\\n\\n\\nMATCH_LOCALE_REGEX = re.compile('(\\\\w+(?:-\\\\w+)*)')\\nRESERVED_VARIABLES = ('_', 'snippet_id')\\n\\n\\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\\n 'version', 'locale', 'appbuildid', 'build_target',\\n 'channel', 'os_version', 'distribution',\\n 'distribution_version', 'modified')\\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\\n 'build_target', 'channel', 'distribution', 'locale')\\n save_on_top = True\\n search_fields = ('description',)\\n\\n\\nclass LogEntryAdmin(admin.ModelAdmin):\\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\\n list_filter = ('user', 'content_type')\\n\\n\\nclass SnippetTemplateVariableInline(admin.TabularInline):\\n model = models.SnippetTemplateVariable\\n formset = forms.SnippetTemplateVariableInlineFormset\\n max_num = 0\\n can_delete = False\\n readonly_fields = ('name',)\\n fields = ('name', 'type', 'order', 'description')\\n\\n\\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\\n save_on_top = True\\n list_display = ('name', 'priority', 'hidden')\\n list_filter = ('hidden', 'startpage')\\n inlines = (SnippetTemplateVariableInline,)\\n formfield_overrides = {\\n TextField: {'widget': AceWidget(mode='html', theme='github',\\n width='1200px', height='500px')},\\n }\\n\\n class Media:\\n css = {\\n 'all': ('css/admin.css',)\\n }\\n\\n def save_related(self, request, form, formsets, change):\\n \\\"\\\"\\\"\\n After saving the related objects, remove and add\\n SnippetTemplateVariables depending on how the template code changed.\\n \\\"\\\"\\\"\\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\\n change)\\n\\n # Parse the template code and find any undefined variables.\\n ast = JINJA_ENV.env.parse(form.instance.code)\\n new_vars = find_undeclared_variables(ast)\\n var_manager = form.instance.variable_set\\n\\n # Filter out reserved variable names.\\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\\n\\n # Delete variables not in the new set.\\n var_manager.filter(~Q(name__in=new_vars)).delete()\\n\\n # Create variables that don't exist.\\n for i, variable in enumerate(new_vars, start=1):\\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\\n template=form.instance, name=variable)\\n if obj.order == 0:\\n obj.order = i * 10\\n obj.save()\\n\\n\\nclass UploadedFileAdmin(admin.ModelAdmin):\\n readonly_fields = ('url', 'preview', 'snippets')\\n list_display = ('name', 'url', 'preview', 'modified')\\n prepopulated_fields = {'name': ('file',)}\\n form = forms.UploadedFileAdminForm\\n\\n def preview(self, obj):\\n template = get_template('base/uploadedfile_preview.jinja')\\n return mark_safe(template.render({'file': obj}))\\n\\n def snippets(self, obj):\\n \\\"\\\"\\\"Snippets using this file.\\\"\\\"\\\"\\n template = get_template('base/uploadedfile_snippets.jinja')\\n return mark_safe(template.render({'snippets': obj.snippets}))\\n\\n\\nclass AddonAdmin(admin.ModelAdmin):\\n list_display = ('name', 'guid')\\n\\n\\nclass ASRSnippetAdmin(admin.ModelAdmin):\\n form = forms.ASRSnippetAdminForm\\n\\n list_display_links = (\\n 'id',\\n 'name',\\n )\\n list_display = (\\n 'id',\\n 'name',\\n 'status',\\n 'modified',\\n )\\n list_filter = (\\n ModifiedFilter,\\n 'status',\\n ReleaseFilter,\\n ('template', RelatedDropdownFilter),\\n )\\n search_fields = (\\n 'name',\\n )\\n autocomplete_fields = (\\n 'campaign',\\n 'target',\\n )\\n preserve_filters = True\\n readonly_fields = (\\n 'created',\\n 'modified',\\n 'uuid',\\n 'creator',\\n 'preview_url',\\n )\\n filter_horizontal = ('locales',)\\n save_on_top = True\\n save_as = True\\n view_on_site = False\\n\\n fieldsets = (\\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\\n ('Content', {\\n 'description': (\\n '''\\n Available deep links:
\\n
    \\n
  1. special:accounts to open Firefox Accounts
  2. \\n
  3. special:appMenu to open the hamburger menu
  4. \\n

\\n Automatically add Snippet ID:
\\n You can use [[snippet_id]] in any field and it\\n will be automatically replaced by Snippet ID when served to users.\\n
\\n Example: This is a &lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt; # noqa\\n
\\n '''\\n ),\\n 'fields': ('template', 'data'),\\n }),\\n ('Publishing Options', {\\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\\n }),\\n ('Other Info', {\\n 'fields': ('uuid', ('created', 'modified')),\\n 'classes': ('collapse',)\\n }),\\n )\\n\\n class Media:\\n css = {\\n 'all': ('css/admin/ASRSnippetAdmin.css',)\\n }\\n\\n def save_model(self, request, obj, form, change):\\n obj.creator = request.user\\n statsd.incr('save.asrsnippet')\\n super().save_model(request, obj, form, change)\\n\\n def preview_url(self, obj):\\n return obj.get_preview_url()\\n\\n\\nclass CampaignAdmin(admin.ModelAdmin):\\n readonly_fields = ('created', 'modified', 'creator',)\\n prepopulated_fields = {'slug': ('name',)}\\n\\n fieldsets = (\\n ('ID', {'fields': ('name', 'slug')}),\\n ('Other Info', {\\n 'fields': ('creator', ('created', 'modified')),\\n }),\\n )\\n search_fields = (\\n 'name',\\n )\\n\\n def save_model(self, request, obj, form, change):\\n obj.creator = request.user\\n statsd.incr('save.campaign')\\n super().save_model(request, obj, form, change)\\n\\n\\nclass TargetAdmin(admin.ModelAdmin):\\n form = forms.TargetAdminForm\\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\\n search_fields = (\\n 'name',\\n )\\n fieldsets = (\\n ('ID', {'fields': ('name',)}),\\n ('Product channels', {\\n 'description': 'What channels will this snippet be available in?',\\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\\n }),\\n ('Targeting', {\\n 'fields': (\\n 'filtr_is_default_browser',\\n 'filtr_updates_enabled',\\n 'filtr_updates_autodownload_enabled',\\n 'filtr_profile_age_created',\\n 'filtr_firefox_version',\\n 'filtr_previous_session_end',\\n 'filtr_uses_firefox_sync',\\n 'filtr_country',\\n 'filtr_is_developer',\\n 'filtr_current_search_engine',\\n 'filtr_browser_addon',\\n 'filtr_total_bookmarks_count',\\n )\\n }),\\n ('Other Info', {\\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\\n }),\\n )\\n\\n def save_model(self, request, obj, form, change):\\n obj.creator = request.user\\n statsd.incr('save.target')\\n super().save_model(request, obj, form, change)\\n\", \"path\": \"snippets/base/admin/adminmodels.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3087,"string":"3,087"},"num_tokens_diff":{"kind":"number","value":260,"string":"260"}}},{"rowIdx":18180,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_1928"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"goauthentik__authentik-3299"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nGet username from mailcow source\n**Is your feature request related to a problem? Please describe.**\r\nI like to get a username from mailcow. With username the enrollment for new users is more simple.\r\n\r\n**Describe the solution you'd like**\r\nSet username to full_name provided by mailcow oauths source.\r\n\r\n**Additional context**\r\nFor other sources the username is also set redundant to another attribute if there is no special source attribute:\r\nazure_ad.py:\r\n```\r\n \"username\": info.get(\"displayName\"),\r\n \"name\": info.get(\"displayName\"),\r\n```\r\n\r\ndiscord.py:\r\n```\r\n \"username\": info.get(\"username\"),\r\n \"name\": info.get(\"username\"),\r\n```\r\n\r\nfacebook.py:\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\r\nreddit.py\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\n\n\n\n[start of authentik/sources/oauth/types/mailcow.py]\n1 \"\"\"Mailcow OAuth Views\"\"\"\n2 from typing import Any, Optional\n3 \n4 from requests.exceptions import RequestException\n5 from structlog.stdlib import get_logger\n6 \n7 from authentik.sources.oauth.clients.oauth2 import OAuth2Client\n8 from authentik.sources.oauth.types.manager import MANAGER, SourceType\n9 from authentik.sources.oauth.views.callback import OAuthCallback\n10 from authentik.sources.oauth.views.redirect import OAuthRedirect\n11 \n12 LOGGER = get_logger()\n13 \n14 \n15 class MailcowOAuthRedirect(OAuthRedirect):\n16 \"\"\"Mailcow OAuth2 Redirect\"\"\"\n17 \n18 def get_additional_parameters(self, source): # pragma: no cover\n19 return {\n20 \"scope\": [\"profile\"],\n21 }\n22 \n23 \n24 class MailcowOAuth2Client(OAuth2Client):\n25 \"\"\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\"\"\"\n26 \n27 def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\n28 \"Fetch user profile information.\"\n29 profile_url = self.source.type.profile_url or \"\"\n30 if self.source.type.urls_customizable and self.source.profile_url:\n31 profile_url = self.source.profile_url\n32 try:\n33 response = self.session.request(\n34 \"get\",\n35 f\"{profile_url}?access_token={token['access_token']}\",\n36 )\n37 response.raise_for_status()\n38 except RequestException as exc:\n39 LOGGER.warning(\"Unable to fetch user profile\", exc=exc, body=response.text)\n40 return None\n41 else:\n42 return response.json()\n43 \n44 \n45 class MailcowOAuth2Callback(OAuthCallback):\n46 \"\"\"Mailcow OAuth2 Callback\"\"\"\n47 \n48 client_class = MailcowOAuth2Client\n49 \n50 def get_user_enroll_context(\n51 self,\n52 info: dict[str, Any],\n53 ) -> dict[str, Any]:\n54 return {\n55 \"email\": info.get(\"email\"),\n56 \"name\": info.get(\"full_name\"),\n57 }\n58 \n59 \n60 @MANAGER.type()\n61 class MailcowType(SourceType):\n62 \"\"\"Mailcow Type definition\"\"\"\n63 \n64 callback_view = MailcowOAuth2Callback\n65 redirect_view = MailcowOAuthRedirect\n66 name = \"Mailcow\"\n67 slug = \"mailcow\"\n68 \n69 urls_customizable = True\n70\n[end of authentik/sources/oauth/types/mailcow.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py\n--- a/authentik/sources/oauth/types/mailcow.py\n+++ b/authentik/sources/oauth/types/mailcow.py\n@@ -52,6 +52,7 @@\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n+ \"username\": info.get(\"full_name\"),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py\\n--- a/authentik/sources/oauth/types/mailcow.py\\n+++ b/authentik/sources/oauth/types/mailcow.py\\n@@ -52,6 +52,7 @@\\n info: dict[str, Any],\\n ) -> dict[str, Any]:\\n return {\\n+ \\\"username\\\": info.get(\\\"full_name\\\"),\\n \\\"email\\\": info.get(\\\"email\\\"),\\n \\\"name\\\": info.get(\\\"full_name\\\"),\\n }\\n\", \"issue\": \"Get username from mailcow source\\n**Is your feature request related to a problem? Please describe.**\\r\\nI like to get a username from mailcow. With username the enrollment for new users is more simple.\\r\\n\\r\\n**Describe the solution you'd like**\\r\\nSet username to full_name provided by mailcow oauths source.\\r\\n\\r\\n**Additional context**\\r\\nFor other sources the username is also set redundant to another attribute if there is no special source attribute:\\r\\nazure_ad.py:\\r\\n```\\r\\n \\\"username\\\": info.get(\\\"displayName\\\"),\\r\\n \\\"name\\\": info.get(\\\"displayName\\\"),\\r\\n```\\r\\n\\r\\ndiscord.py:\\r\\n```\\r\\n \\\"username\\\": info.get(\\\"username\\\"),\\r\\n \\\"name\\\": info.get(\\\"username\\\"),\\r\\n```\\r\\n\\r\\nfacebook.py:\\r\\n```\\r\\n \\\"username\\\": info.get(\\\"name\\\"),\\r\\n \\\"name\\\": info.get(\\\"name\\\"),\\r\\n```\\r\\n\\r\\nreddit.py\\r\\n```\\r\\n \\\"username\\\": info.get(\\\"name\\\"),\\r\\n \\\"name\\\": info.get(\\\"name\\\"),\\r\\n```\\r\\n\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Mailcow OAuth Views\\\"\\\"\\\"\\nfrom typing import Any, Optional\\n\\nfrom requests.exceptions import RequestException\\nfrom structlog.stdlib import get_logger\\n\\nfrom authentik.sources.oauth.clients.oauth2 import OAuth2Client\\nfrom authentik.sources.oauth.types.manager import MANAGER, SourceType\\nfrom authentik.sources.oauth.views.callback import OAuthCallback\\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\\n\\nLOGGER = get_logger()\\n\\n\\nclass MailcowOAuthRedirect(OAuthRedirect):\\n \\\"\\\"\\\"Mailcow OAuth2 Redirect\\\"\\\"\\\"\\n\\n def get_additional_parameters(self, source): # pragma: no cover\\n return {\\n \\\"scope\\\": [\\\"profile\\\"],\\n }\\n\\n\\nclass MailcowOAuth2Client(OAuth2Client):\\n \\\"\\\"\\\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\\\"\\\"\\\"\\n\\n def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\\n \\\"Fetch user profile information.\\\"\\n profile_url = self.source.type.profile_url or \\\"\\\"\\n if self.source.type.urls_customizable and self.source.profile_url:\\n profile_url = self.source.profile_url\\n try:\\n response = self.session.request(\\n \\\"get\\\",\\n f\\\"{profile_url}?access_token={token['access_token']}\\\",\\n )\\n response.raise_for_status()\\n except RequestException as exc:\\n LOGGER.warning(\\\"Unable to fetch user profile\\\", exc=exc, body=response.text)\\n return None\\n else:\\n return response.json()\\n\\n\\nclass MailcowOAuth2Callback(OAuthCallback):\\n \\\"\\\"\\\"Mailcow OAuth2 Callback\\\"\\\"\\\"\\n\\n client_class = MailcowOAuth2Client\\n\\n def get_user_enroll_context(\\n self,\\n info: dict[str, Any],\\n ) -> dict[str, Any]:\\n return {\\n \\\"email\\\": info.get(\\\"email\\\"),\\n \\\"name\\\": info.get(\\\"full_name\\\"),\\n }\\n\\n\\n@MANAGER.type()\\nclass MailcowType(SourceType):\\n \\\"\\\"\\\"Mailcow Type definition\\\"\\\"\\\"\\n\\n callback_view = MailcowOAuth2Callback\\n redirect_view = MailcowOAuthRedirect\\n name = \\\"Mailcow\\\"\\n slug = \\\"mailcow\\\"\\n\\n urls_customizable = True\\n\", \"path\": \"authentik/sources/oauth/types/mailcow.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1319,"string":"1,319"},"num_tokens_diff":{"kind":"number","value":111,"string":"111"}}},{"rowIdx":18181,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_26693"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"benoitc__gunicorn-1581"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\n--config doesn't work in GUNICORN_CMD_ARGS\nSpecifying `--config` in the `GUNICORN_CMD_ARGS` environment variable quietly fails as config file loading only happens when it is passed as an argument:\r\nhttps://github.com/benoitc/gunicorn/blob/328e509260ae70de6c04c5ba885ee17960b3ced5/gunicorn/app/base.py#L137-L175\n\n\n\n[start of gunicorn/app/base.py]\n1 # -*- coding: utf-8 -\n2 #\n3 # This file is part of gunicorn released under the MIT license.\n4 # See the NOTICE for more information.\n5 from __future__ import print_function\n6 \n7 import os\n8 import sys\n9 import traceback\n10 \n11 from gunicorn._compat import execfile_\n12 from gunicorn import util\n13 from gunicorn.arbiter import Arbiter\n14 from gunicorn.config import Config, get_default_config_file\n15 from gunicorn import debug\n16 \n17 class BaseApplication(object):\n18 \"\"\"\n19 An application interface for configuring and loading\n20 the various necessities for any given web framework.\n21 \"\"\"\n22 def __init__(self, usage=None, prog=None):\n23 self.usage = usage\n24 self.cfg = None\n25 self.callable = None\n26 self.prog = prog\n27 self.logger = None\n28 self.do_load_config()\n29 \n30 def do_load_config(self):\n31 \"\"\"\n32 Loads the configuration\n33 \"\"\"\n34 try:\n35 self.load_default_config()\n36 self.load_config()\n37 except Exception as e:\n38 print(\"\\nError: %s\" % str(e), file=sys.stderr)\n39 sys.stderr.flush()\n40 sys.exit(1)\n41 \n42 def load_default_config(self):\n43 # init configuration\n44 self.cfg = Config(self.usage, prog=self.prog)\n45 \n46 def init(self, parser, opts, args):\n47 raise NotImplementedError\n48 \n49 def load(self):\n50 raise NotImplementedError\n51 \n52 def load_config(self):\n53 \"\"\"\n54 This method is used to load the configuration from one or several input(s).\n55 Custom Command line, configuration file.\n56 You have to override this method in your class.\n57 \"\"\"\n58 raise NotImplementedError\n59 \n60 def reload(self):\n61 self.do_load_config()\n62 if self.cfg.spew:\n63 debug.spew()\n64 \n65 def wsgi(self):\n66 if self.callable is None:\n67 self.callable = self.load()\n68 return self.callable\n69 \n70 def run(self):\n71 try:\n72 Arbiter(self).run()\n73 except RuntimeError as e:\n74 print(\"\\nError: %s\\n\" % e, file=sys.stderr)\n75 sys.stderr.flush()\n76 sys.exit(1)\n77 \n78 class Application(BaseApplication):\n79 \n80 def get_config_from_filename(self, filename):\n81 \n82 if not os.path.exists(filename):\n83 raise RuntimeError(\"%r doesn't exist\" % filename)\n84 \n85 cfg = {\n86 \"__builtins__\": __builtins__,\n87 \"__name__\": \"__config__\",\n88 \"__file__\": filename,\n89 \"__doc__\": None,\n90 \"__package__\": None\n91 }\n92 try:\n93 execfile_(filename, cfg, cfg)\n94 except Exception:\n95 print(\"Failed to read config file: %s\" % filename, file=sys.stderr)\n96 traceback.print_exc()\n97 sys.stderr.flush()\n98 sys.exit(1)\n99 \n100 return cfg\n101 \n102 def get_config_from_module_name(self, module_name):\n103 return vars(util.import_module(module_name))\n104 \n105 def load_config_from_module_name_or_filename(self, location):\n106 \"\"\"\n107 Loads the configuration file: the file is a python file, otherwise raise an RuntimeError\n108 Exception or stop the process if the configuration file contains a syntax error.\n109 \"\"\"\n110 \n111 if location.startswith(\"python:\"):\n112 module_name = location[len(\"python:\"):]\n113 cfg = self.get_config_from_module_name(module_name)\n114 else:\n115 if location.startswith(\"file:\"):\n116 filename = location[len(\"file:\"):]\n117 else:\n118 filename = location\n119 cfg = self.get_config_from_filename(filename)\n120 \n121 for k, v in cfg.items():\n122 # Ignore unknown names\n123 if k not in self.cfg.settings:\n124 continue\n125 try:\n126 self.cfg.set(k.lower(), v)\n127 except:\n128 print(\"Invalid value for %s: %s\\n\" % (k, v), file=sys.stderr)\n129 sys.stderr.flush()\n130 raise\n131 \n132 return cfg\n133 \n134 def load_config_from_file(self, filename):\n135 return self.load_config_from_module_name_or_filename(location=filename)\n136 \n137 def load_config(self):\n138 # parse console args\n139 parser = self.cfg.parser()\n140 args = parser.parse_args()\n141 \n142 # optional settings from apps\n143 cfg = self.init(parser, args, args.args)\n144 \n145 # Load up the any app specific configuration\n146 if cfg:\n147 for k, v in cfg.items():\n148 self.cfg.set(k.lower(), v)\n149 \n150 if args.config:\n151 self.load_config_from_file(args.config)\n152 else:\n153 default_config = get_default_config_file()\n154 if default_config is not None:\n155 self.load_config_from_file(default_config)\n156 \n157 # Load up environment configuration\n158 env_vars = self.cfg.get_cmd_args_from_env()\n159 if env_vars:\n160 env_args = parser.parse_args(env_vars)\n161 for k, v in vars(env_args).items():\n162 if v is None:\n163 continue\n164 if k == \"args\":\n165 continue\n166 self.cfg.set(k.lower(), v)\n167 \n168 # Lastly, update the configuration with any command line\n169 # settings.\n170 for k, v in vars(args).items():\n171 if v is None:\n172 continue\n173 if k == \"args\":\n174 continue\n175 self.cfg.set(k.lower(), v)\n176 \n177 def run(self):\n178 if self.cfg.check_config:\n179 try:\n180 self.load()\n181 except:\n182 msg = \"\\nError while loading the application:\\n\"\n183 print(msg, file=sys.stderr)\n184 traceback.print_exc()\n185 sys.stderr.flush()\n186 sys.exit(1)\n187 sys.exit(0)\n188 \n189 if self.cfg.spew:\n190 debug.spew()\n191 \n192 if self.cfg.daemon:\n193 util.daemonize(self.cfg.enable_stdio_inheritance)\n194 \n195 # set python paths\n196 if self.cfg.pythonpath:\n197 paths = self.cfg.pythonpath.split(\",\")\n198 for path in paths:\n199 pythonpath = os.path.abspath(path)\n200 if pythonpath not in sys.path:\n201 sys.path.insert(0, pythonpath)\n202 \n203 super(Application, self).run()\n204\n[end of gunicorn/app/base.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/gunicorn/app/base.py b/gunicorn/app/base.py\n--- a/gunicorn/app/base.py\n+++ b/gunicorn/app/base.py\n@@ -147,26 +147,26 @@\n for k, v in cfg.items():\n self.cfg.set(k.lower(), v)\n \n+ env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n+\n if args.config:\n self.load_config_from_file(args.config)\n+ elif env_args.config:\n+ self.load_config_from_file(env_args.config)\n else:\n default_config = get_default_config_file()\n if default_config is not None:\n self.load_config_from_file(default_config)\n \n # Load up environment configuration\n- env_vars = self.cfg.get_cmd_args_from_env()\n- if env_vars:\n- env_args = parser.parse_args(env_vars)\n- for k, v in vars(env_args).items():\n- if v is None:\n- continue\n- if k == \"args\":\n- continue\n- self.cfg.set(k.lower(), v)\n+ for k, v in vars(env_args).items():\n+ if v is None:\n+ continue\n+ if k == \"args\":\n+ continue\n+ self.cfg.set(k.lower(), v)\n \n- # Lastly, update the configuration with any command line\n- # settings.\n+ # Lastly, update the configuration with any command line settings.\n for k, v in vars(args).items():\n if v is None:\n continue\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/gunicorn/app/base.py b/gunicorn/app/base.py\\n--- a/gunicorn/app/base.py\\n+++ b/gunicorn/app/base.py\\n@@ -147,26 +147,26 @@\\n for k, v in cfg.items():\\n self.cfg.set(k.lower(), v)\\n \\n+ env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\\n+\\n if args.config:\\n self.load_config_from_file(args.config)\\n+ elif env_args.config:\\n+ self.load_config_from_file(env_args.config)\\n else:\\n default_config = get_default_config_file()\\n if default_config is not None:\\n self.load_config_from_file(default_config)\\n \\n # Load up environment configuration\\n- env_vars = self.cfg.get_cmd_args_from_env()\\n- if env_vars:\\n- env_args = parser.parse_args(env_vars)\\n- for k, v in vars(env_args).items():\\n- if v is None:\\n- continue\\n- if k == \\\"args\\\":\\n- continue\\n- self.cfg.set(k.lower(), v)\\n+ for k, v in vars(env_args).items():\\n+ if v is None:\\n+ continue\\n+ if k == \\\"args\\\":\\n+ continue\\n+ self.cfg.set(k.lower(), v)\\n \\n- # Lastly, update the configuration with any command line\\n- # settings.\\n+ # Lastly, update the configuration with any command line settings.\\n for k, v in vars(args).items():\\n if v is None:\\n continue\\n\", \"issue\": \"--config doesn't work in GUNICORN_CMD_ARGS\\nSpecifying `--config` in the `GUNICORN_CMD_ARGS` environment variable quietly fails as config file loading only happens when it is passed as an argument:\\r\\nhttps://github.com/benoitc/gunicorn/blob/328e509260ae70de6c04c5ba885ee17960b3ced5/gunicorn/app/base.py#L137-L175\\n\", \"before_files\": [{\"content\": \"# -*- coding: utf-8 -\\n#\\n# This file is part of gunicorn released under the MIT license.\\n# See the NOTICE for more information.\\nfrom __future__ import print_function\\n\\nimport os\\nimport sys\\nimport traceback\\n\\nfrom gunicorn._compat import execfile_\\nfrom gunicorn import util\\nfrom gunicorn.arbiter import Arbiter\\nfrom gunicorn.config import Config, get_default_config_file\\nfrom gunicorn import debug\\n\\nclass BaseApplication(object):\\n \\\"\\\"\\\"\\n An application interface for configuring and loading\\n the various necessities for any given web framework.\\n \\\"\\\"\\\"\\n def __init__(self, usage=None, prog=None):\\n self.usage = usage\\n self.cfg = None\\n self.callable = None\\n self.prog = prog\\n self.logger = None\\n self.do_load_config()\\n\\n def do_load_config(self):\\n \\\"\\\"\\\"\\n Loads the configuration\\n \\\"\\\"\\\"\\n try:\\n self.load_default_config()\\n self.load_config()\\n except Exception as e:\\n print(\\\"\\\\nError: %s\\\" % str(e), file=sys.stderr)\\n sys.stderr.flush()\\n sys.exit(1)\\n\\n def load_default_config(self):\\n # init configuration\\n self.cfg = Config(self.usage, prog=self.prog)\\n\\n def init(self, parser, opts, args):\\n raise NotImplementedError\\n\\n def load(self):\\n raise NotImplementedError\\n\\n def load_config(self):\\n \\\"\\\"\\\"\\n This method is used to load the configuration from one or several input(s).\\n Custom Command line, configuration file.\\n You have to override this method in your class.\\n \\\"\\\"\\\"\\n raise NotImplementedError\\n\\n def reload(self):\\n self.do_load_config()\\n if self.cfg.spew:\\n debug.spew()\\n\\n def wsgi(self):\\n if self.callable is None:\\n self.callable = self.load()\\n return self.callable\\n\\n def run(self):\\n try:\\n Arbiter(self).run()\\n except RuntimeError as e:\\n print(\\\"\\\\nError: %s\\\\n\\\" % e, file=sys.stderr)\\n sys.stderr.flush()\\n sys.exit(1)\\n\\nclass Application(BaseApplication):\\n\\n def get_config_from_filename(self, filename):\\n\\n if not os.path.exists(filename):\\n raise RuntimeError(\\\"%r doesn't exist\\\" % filename)\\n\\n cfg = {\\n \\\"__builtins__\\\": __builtins__,\\n \\\"__name__\\\": \\\"__config__\\\",\\n \\\"__file__\\\": filename,\\n \\\"__doc__\\\": None,\\n \\\"__package__\\\": None\\n }\\n try:\\n execfile_(filename, cfg, cfg)\\n except Exception:\\n print(\\\"Failed to read config file: %s\\\" % filename, file=sys.stderr)\\n traceback.print_exc()\\n sys.stderr.flush()\\n sys.exit(1)\\n\\n return cfg\\n\\n def get_config_from_module_name(self, module_name):\\n return vars(util.import_module(module_name))\\n\\n def load_config_from_module_name_or_filename(self, location):\\n \\\"\\\"\\\"\\n Loads the configuration file: the file is a python file, otherwise raise an RuntimeError\\n Exception or stop the process if the configuration file contains a syntax error.\\n \\\"\\\"\\\"\\n\\n if location.startswith(\\\"python:\\\"):\\n module_name = location[len(\\\"python:\\\"):]\\n cfg = self.get_config_from_module_name(module_name)\\n else:\\n if location.startswith(\\\"file:\\\"):\\n filename = location[len(\\\"file:\\\"):]\\n else:\\n filename = location\\n cfg = self.get_config_from_filename(filename)\\n\\n for k, v in cfg.items():\\n # Ignore unknown names\\n if k not in self.cfg.settings:\\n continue\\n try:\\n self.cfg.set(k.lower(), v)\\n except:\\n print(\\\"Invalid value for %s: %s\\\\n\\\" % (k, v), file=sys.stderr)\\n sys.stderr.flush()\\n raise\\n\\n return cfg\\n\\n def load_config_from_file(self, filename):\\n return self.load_config_from_module_name_or_filename(location=filename)\\n\\n def load_config(self):\\n # parse console args\\n parser = self.cfg.parser()\\n args = parser.parse_args()\\n\\n # optional settings from apps\\n cfg = self.init(parser, args, args.args)\\n\\n # Load up the any app specific configuration\\n if cfg:\\n for k, v in cfg.items():\\n self.cfg.set(k.lower(), v)\\n\\n if args.config:\\n self.load_config_from_file(args.config)\\n else:\\n default_config = get_default_config_file()\\n if default_config is not None:\\n self.load_config_from_file(default_config)\\n\\n # Load up environment configuration\\n env_vars = self.cfg.get_cmd_args_from_env()\\n if env_vars:\\n env_args = parser.parse_args(env_vars)\\n for k, v in vars(env_args).items():\\n if v is None:\\n continue\\n if k == \\\"args\\\":\\n continue\\n self.cfg.set(k.lower(), v)\\n\\n # Lastly, update the configuration with any command line\\n # settings.\\n for k, v in vars(args).items():\\n if v is None:\\n continue\\n if k == \\\"args\\\":\\n continue\\n self.cfg.set(k.lower(), v)\\n\\n def run(self):\\n if self.cfg.check_config:\\n try:\\n self.load()\\n except:\\n msg = \\\"\\\\nError while loading the application:\\\\n\\\"\\n print(msg, file=sys.stderr)\\n traceback.print_exc()\\n sys.stderr.flush()\\n sys.exit(1)\\n sys.exit(0)\\n\\n if self.cfg.spew:\\n debug.spew()\\n\\n if self.cfg.daemon:\\n util.daemonize(self.cfg.enable_stdio_inheritance)\\n\\n # set python paths\\n if self.cfg.pythonpath:\\n paths = self.cfg.pythonpath.split(\\\",\\\")\\n for path in paths:\\n pythonpath = os.path.abspath(path)\\n if pythonpath not in sys.path:\\n sys.path.insert(0, pythonpath)\\n\\n super(Application, self).run()\\n\", \"path\": \"gunicorn/app/base.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2398,"string":"2,398"},"num_tokens_diff":{"kind":"number","value":328,"string":"328"}}},{"rowIdx":18182,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_6594"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pytorch__ignite-1192"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nDistributedProxySampler RuntimeError when indices are padded\n## 🐛 Bug description\r\n\r\nThe RuntimeError that occurs in the `DistributedProxySampler` on [line 241](https://github.com/pytorch/ignite/blob/master/ignite/distributed/auto.py#L241) shouldn't be there since the indices are padded with the full sample which was updated because of [this comment](https://github.com/pytorch/pytorch/issues/23430#issuecomment-597191137).\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4):\r\n - Ignite Version (e.g., 0.3.0):\r\n - OS (e.g., Linux):\r\n - How you installed Ignite (`conda`, `pip`, source):\r\n - Python version:\r\n - Any other relevant information:\r\n\n\n\n\n[start of ignite/distributed/auto.py]\n1 import warnings\n2 \n3 import torch\n4 import torch.nn as nn\n5 from torch.optim.optimizer import Optimizer\n6 from torch.utils.data import DataLoader, Dataset\n7 from torch.utils.data.distributed import DistributedSampler\n8 from torch.utils.data.sampler import Sampler\n9 \n10 from ignite.distributed import utils as idist\n11 from ignite.distributed.comp_models import native as idist_native\n12 from ignite.distributed.comp_models import xla as idist_xla\n13 from ignite.utils import setup_logger\n14 \n15 __all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n16 \n17 \n18 def auto_dataloader(dataset, **kwargs):\n19 \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n20 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n21 \n22 Internally, we create a dataloader with provided kwargs while applying the following updates:\n23 \n24 - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n25 - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n26 - if no sampler provided by user, `torch DistributedSampler` is setup.\n27 - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n28 - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n29 \n30 .. warning::\n31 \n32 Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n33 sampler is compatible with distributed configuration.\n34 \n35 Examples:\n36 \n37 .. code-block:: python\n38 \n39 import ignite.distribted as idist\n40 \n41 train_loader = idist.auto_dataloader(\n42 train_dataset,\n43 batch_size=32,\n44 num_workers=4,\n45 shuffle=True,\n46 pin_memory=\"cuda\" in idist.device().type,\n47 drop_last=True,\n48 )\n49 \n50 Args:\n51 dataset (Dataset): input torch dataset\n52 **kwargs: keyword arguments for `torch DataLoader`_.\n53 \n54 Returns:\n55 `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n56 \n57 .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n58 .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n59 .. _torch DistributedSampler:\n60 https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n61 \"\"\"\n62 rank = idist.get_rank()\n63 world_size = idist.get_world_size()\n64 \n65 logger = setup_logger(__name__ + \".auto_dataloader\")\n66 if world_size > 1:\n67 if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n68 kwargs[\"batch_size\"] //= world_size\n69 \n70 nproc = idist.get_nproc_per_node()\n71 if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n72 kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n73 \n74 if \"batch_sampler\" not in kwargs:\n75 if kwargs.get(\"sampler\", None) is not None:\n76 sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n77 else:\n78 sampler = DistributedSampler(\n79 dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n80 )\n81 # we need to remove \"shuffle\" from kwargs if sampler is used\n82 if \"shuffle\" in kwargs:\n83 del kwargs[\"shuffle\"]\n84 \n85 kwargs[\"sampler\"] = sampler\n86 else:\n87 warnings.warn(\n88 \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n89 \"with distributed configuration\"\n90 )\n91 \n92 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n93 # TODO: How about XLA GPU ?\n94 warnings.warn(\n95 \"Found incompatible options: xla support and pin_memory args equal True. \"\n96 \"Argument `pin_memory=False` will be used to construct data loader.\"\n97 )\n98 kwargs[\"pin_memory\"] = False\n99 else:\n100 kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n101 \n102 logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n103 dataloader = DataLoader(dataset, **kwargs)\n104 \n105 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n106 \n107 logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n108 \n109 mp_device_loader_cls = _MpDeviceLoader\n110 try:\n111 from torch_xla.distributed.parallel_loader import MpDeviceLoader\n112 \n113 mp_device_loader_cls = MpDeviceLoader\n114 except ImportError:\n115 pass\n116 \n117 sampler = dataloader.sampler\n118 dataloader = mp_device_loader_cls(dataloader, idist.device())\n119 dataloader.sampler = sampler\n120 \n121 return dataloader\n122 \n123 \n124 def auto_model(model: nn.Module) -> nn.Module:\n125 \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n126 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n127 \n128 Internally, we perform to following:\n129 \n130 - send model to current :meth:`~ignite.distributed.utils.device()`.\n131 - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n132 - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n133 \n134 Examples:\n135 \n136 .. code-block:: python\n137 \n138 import ignite.distribted as idist\n139 \n140 model = idist.auto_model(model)\n141 \n142 Args:\n143 model (torch.nn.Module): model to adapt.\n144 \n145 Returns:\n146 torch.nn.Module\n147 \n148 .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n149 .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n150 \"\"\"\n151 logger = setup_logger(__name__ + \".auto_model\")\n152 \n153 model.to(idist.device())\n154 \n155 # distributed data parallel model\n156 if idist.get_world_size() > 1:\n157 if idist.backend() == idist_native.NCCL:\n158 lrank = idist.get_local_rank()\n159 logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n160 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n161 elif idist.backend() == idist_native.GLOO:\n162 logger.info(\"Apply torch DistributedDataParallel on model\")\n163 model = torch.nn.parallel.DistributedDataParallel(model)\n164 \n165 # not distributed but multiple GPUs reachable so data parallel model\n166 elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n167 logger.info(\"Apply torch DataParallel on model\")\n168 model = torch.nn.parallel.DataParallel(model)\n169 \n170 return model\n171 \n172 \n173 def auto_optim(optimizer: Optimizer) -> Optimizer:\n174 \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n175 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n176 \n177 Internally, this method is no-op for non-distributed and torch native distributed configuration.\n178 For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n179 The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n180 \n181 Examples:\n182 \n183 .. code-block:: python\n184 \n185 import ignite.distribted as idist\n186 \n187 optimizer = idist.auto_optim(optimizer)\n188 \n189 \n190 Args:\n191 optimizer (Optimizer): input torch optimizer\n192 \n193 Returns:\n194 Optimizer\n195 \n196 .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n197 \n198 \"\"\"\n199 if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n200 return optimizer\n201 \n202 cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n203 return cls(optimizer)\n204 \n205 \n206 class DistributedProxySampler(DistributedSampler):\n207 \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n208 \n209 Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n210 \n211 \n212 .. note::\n213 Input sampler is assumed to have a constant size.\n214 \n215 Args:\n216 sampler (Sampler): Input torch data sampler.\n217 num_replicas (int, optional): Number of processes participating in distributed training.\n218 rank (int, optional): Rank of the current process within ``num_replicas``.\n219 \n220 \"\"\"\n221 \n222 def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n223 \n224 if not isinstance(sampler, Sampler):\n225 raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n226 \n227 if not hasattr(sampler, \"__len__\"):\n228 raise TypeError(\"Argument sampler should have length\")\n229 \n230 super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n231 self.sampler = sampler\n232 \n233 def __iter__(self):\n234 # deterministically shuffle based on epoch\n235 torch.manual_seed(self.epoch)\n236 \n237 indices = []\n238 while len(indices) < self.total_size:\n239 indices += list(self.sampler)\n240 \n241 if len(indices) != self.total_size:\n242 raise RuntimeError(\"{} vs {}\".format(len(indices), self.total_size))\n243 \n244 # subsample\n245 indices = indices[self.rank : self.total_size : self.num_replicas]\n246 if len(indices) != self.num_samples:\n247 raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n248 \n249 return iter(indices)\n250 \n251 \n252 if idist.has_xla_support:\n253 \n254 import torch_xla.core.xla_model as xm\n255 from torch_xla.distributed.parallel_loader import ParallelLoader\n256 \n257 class _MpDeviceLoader:\n258 # https://github.com/pytorch/xla/pull/2117\n259 # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n260 def __init__(self, loader, device, **kwargs):\n261 self._loader = loader\n262 self._device = device\n263 self._parallel_loader_kwargs = kwargs\n264 \n265 def __iter__(self):\n266 parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n267 return parallel_loader.per_device_loader(self._device)\n268 \n269 def __len__(self):\n270 return len(self._loader)\n271 \n272 class _XLADistributedOptimizer(Optimizer):\n273 def __init__(self, optimizer):\n274 super(self.__class__, self).__init__(optimizer.param_groups)\n275 self.wrapped_optimizer = optimizer\n276 \n277 def step(self, closure=None):\n278 xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n279\n[end of ignite/distributed/auto.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py\n--- a/ignite/distributed/auto.py\n+++ b/ignite/distributed/auto.py\n@@ -238,8 +238,8 @@\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n \n- if len(indices) != self.total_size:\n- raise RuntimeError(\"{} vs {}\".format(len(indices), self.total_size))\n+ if len(indices) > self.total_size:\n+ indices = indices[: self.total_size]\n \n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py\\n--- a/ignite/distributed/auto.py\\n+++ b/ignite/distributed/auto.py\\n@@ -238,8 +238,8 @@\\n while len(indices) < self.total_size:\\n indices += list(self.sampler)\\n \\n- if len(indices) != self.total_size:\\n- raise RuntimeError(\\\"{} vs {}\\\".format(len(indices), self.total_size))\\n+ if len(indices) > self.total_size:\\n+ indices = indices[: self.total_size]\\n \\n # subsample\\n indices = indices[self.rank : self.total_size : self.num_replicas]\\n\", \"issue\": \"DistributedProxySampler RuntimeError when indices are padded\\n## \\ud83d\\udc1b Bug description\\r\\n\\r\\nThe RuntimeError that occurs in the `DistributedProxySampler` on [line 241](https://github.com/pytorch/ignite/blob/master/ignite/distributed/auto.py#L241) shouldn't be there since the indices are padded with the full sample which was updated because of [this comment](https://github.com/pytorch/pytorch/issues/23430#issuecomment-597191137).\\r\\n\\r\\n## Environment\\r\\n\\r\\n - PyTorch Version (e.g., 1.4):\\r\\n - Ignite Version (e.g., 0.3.0):\\r\\n - OS (e.g., Linux):\\r\\n - How you installed Ignite (`conda`, `pip`, source):\\r\\n - Python version:\\r\\n - Any other relevant information:\\r\\n\\n\", \"before_files\": [{\"content\": \"import warnings\\n\\nimport torch\\nimport torch.nn as nn\\nfrom torch.optim.optimizer import Optimizer\\nfrom torch.utils.data import DataLoader, Dataset\\nfrom torch.utils.data.distributed import DistributedSampler\\nfrom torch.utils.data.sampler import Sampler\\n\\nfrom ignite.distributed import utils as idist\\nfrom ignite.distributed.comp_models import native as idist_native\\nfrom ignite.distributed.comp_models import xla as idist_xla\\nfrom ignite.utils import setup_logger\\n\\n__all__ = [\\\"auto_dataloader\\\", \\\"auto_model\\\", \\\"auto_optim\\\", \\\"DistributedProxySampler\\\"]\\n\\n\\ndef auto_dataloader(dataset, **kwargs):\\n \\\"\\\"\\\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\\n\\n Internally, we create a dataloader with provided kwargs while applying the following updates:\\n\\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\\n - if no sampler provided by user, `torch DistributedSampler` is setup.\\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\\n\\n .. warning::\\n\\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\\n sampler is compatible with distributed configuration.\\n\\n Examples:\\n\\n .. code-block:: python\\n\\n import ignite.distribted as idist\\n\\n train_loader = idist.auto_dataloader(\\n train_dataset,\\n batch_size=32,\\n num_workers=4,\\n shuffle=True,\\n pin_memory=\\\"cuda\\\" in idist.device().type,\\n drop_last=True,\\n )\\n\\n Args:\\n dataset (Dataset): input torch dataset\\n **kwargs: keyword arguments for `torch DataLoader`_.\\n\\n Returns:\\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\\n\\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\\n .. _torch DistributedSampler:\\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\\n \\\"\\\"\\\"\\n rank = idist.get_rank()\\n world_size = idist.get_world_size()\\n\\n logger = setup_logger(__name__ + \\\".auto_dataloader\\\")\\n if world_size > 1:\\n if \\\"batch_size\\\" in kwargs and kwargs[\\\"batch_size\\\"] >= world_size:\\n kwargs[\\\"batch_size\\\"] //= world_size\\n\\n nproc = idist.get_nproc_per_node()\\n if \\\"num_workers\\\" in kwargs and kwargs[\\\"num_workers\\\"] >= nproc:\\n kwargs[\\\"num_workers\\\"] = (kwargs[\\\"num_workers\\\"] + nproc - 1) // nproc\\n\\n if \\\"batch_sampler\\\" not in kwargs:\\n if kwargs.get(\\\"sampler\\\", None) is not None:\\n sampler = DistributedProxySampler(kwargs[\\\"sampler\\\"], num_replicas=world_size, rank=rank)\\n else:\\n sampler = DistributedSampler(\\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\\\"shuffle\\\", True)\\n )\\n # we need to remove \\\"shuffle\\\" from kwargs if sampler is used\\n if \\\"shuffle\\\" in kwargs:\\n del kwargs[\\\"shuffle\\\"]\\n\\n kwargs[\\\"sampler\\\"] = sampler\\n else:\\n warnings.warn(\\n \\\"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \\\"\\n \\\"with distributed configuration\\\"\\n )\\n\\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\\\"pin_memory\\\", False):\\n # TODO: How about XLA GPU ?\\n warnings.warn(\\n \\\"Found incompatible options: xla support and pin_memory args equal True. \\\"\\n \\\"Argument `pin_memory=False` will be used to construct data loader.\\\"\\n )\\n kwargs[\\\"pin_memory\\\"] = False\\n else:\\n kwargs[\\\"pin_memory\\\"] = kwargs.get(\\\"pin_memory\\\", \\\"cuda\\\" in idist.device().type)\\n\\n logger.info(\\\"Use data loader kwargs for dataset '{}': \\\\n\\\\t{}\\\".format(repr(dataset)[:20].strip(), kwargs))\\n dataloader = DataLoader(dataset, **kwargs)\\n\\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\\n\\n logger.info(\\\"DataLoader is wrapped by `MpDeviceLoader` on XLA\\\")\\n\\n mp_device_loader_cls = _MpDeviceLoader\\n try:\\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\\n\\n mp_device_loader_cls = MpDeviceLoader\\n except ImportError:\\n pass\\n\\n sampler = dataloader.sampler\\n dataloader = mp_device_loader_cls(dataloader, idist.device())\\n dataloader.sampler = sampler\\n\\n return dataloader\\n\\n\\ndef auto_model(model: nn.Module) -> nn.Module:\\n \\\"\\\"\\\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\\n\\n Internally, we perform to following:\\n\\n - send model to current :meth:`~ignite.distributed.utils.device()`.\\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\\n\\n Examples:\\n\\n .. code-block:: python\\n\\n import ignite.distribted as idist\\n\\n model = idist.auto_model(model)\\n\\n Args:\\n model (torch.nn.Module): model to adapt.\\n\\n Returns:\\n torch.nn.Module\\n\\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\\n \\\"\\\"\\\"\\n logger = setup_logger(__name__ + \\\".auto_model\\\")\\n\\n model.to(idist.device())\\n\\n # distributed data parallel model\\n if idist.get_world_size() > 1:\\n if idist.backend() == idist_native.NCCL:\\n lrank = idist.get_local_rank()\\n logger.info(\\\"Apply torch DistributedDataParallel on model, device id: {}\\\".format(lrank))\\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\\n elif idist.backend() == idist_native.GLOO:\\n logger.info(\\\"Apply torch DistributedDataParallel on model\\\")\\n model = torch.nn.parallel.DistributedDataParallel(model)\\n\\n # not distributed but multiple GPUs reachable so data parallel model\\n elif torch.cuda.device_count() > 1 and \\\"cuda\\\" in idist.device().type:\\n logger.info(\\\"Apply torch DataParallel on model\\\")\\n model = torch.nn.parallel.DataParallel(model)\\n\\n return model\\n\\n\\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\\n \\\"\\\"\\\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\\n\\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\\n\\n Examples:\\n\\n .. code-block:: python\\n\\n import ignite.distribted as idist\\n\\n optimizer = idist.auto_optim(optimizer)\\n\\n\\n Args:\\n optimizer (Optimizer): input torch optimizer\\n\\n Returns:\\n Optimizer\\n\\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\\n\\n \\\"\\\"\\\"\\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\\n return optimizer\\n\\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\\n return cls(optimizer)\\n\\n\\nclass DistributedProxySampler(DistributedSampler):\\n \\\"\\\"\\\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\\n\\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\\n\\n\\n .. note::\\n Input sampler is assumed to have a constant size.\\n\\n Args:\\n sampler (Sampler): Input torch data sampler.\\n num_replicas (int, optional): Number of processes participating in distributed training.\\n rank (int, optional): Rank of the current process within ``num_replicas``.\\n\\n \\\"\\\"\\\"\\n\\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\\n\\n if not isinstance(sampler, Sampler):\\n raise TypeError(\\\"Argument sampler should be instance of torch Sampler, but given: {}\\\".format(type(sampler)))\\n\\n if not hasattr(sampler, \\\"__len__\\\"):\\n raise TypeError(\\\"Argument sampler should have length\\\")\\n\\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\\n self.sampler = sampler\\n\\n def __iter__(self):\\n # deterministically shuffle based on epoch\\n torch.manual_seed(self.epoch)\\n\\n indices = []\\n while len(indices) < self.total_size:\\n indices += list(self.sampler)\\n\\n if len(indices) != self.total_size:\\n raise RuntimeError(\\\"{} vs {}\\\".format(len(indices), self.total_size))\\n\\n # subsample\\n indices = indices[self.rank : self.total_size : self.num_replicas]\\n if len(indices) != self.num_samples:\\n raise RuntimeError(\\\"{} vs {}\\\".format(len(indices), self.num_samples))\\n\\n return iter(indices)\\n\\n\\nif idist.has_xla_support:\\n\\n import torch_xla.core.xla_model as xm\\n from torch_xla.distributed.parallel_loader import ParallelLoader\\n\\n class _MpDeviceLoader:\\n # https://github.com/pytorch/xla/pull/2117\\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\\n def __init__(self, loader, device, **kwargs):\\n self._loader = loader\\n self._device = device\\n self._parallel_loader_kwargs = kwargs\\n\\n def __iter__(self):\\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\\n return parallel_loader.per_device_loader(self._device)\\n\\n def __len__(self):\\n return len(self._loader)\\n\\n class _XLADistributedOptimizer(Optimizer):\\n def __init__(self, optimizer):\\n super(self.__class__, self).__init__(optimizer.param_groups)\\n self.wrapped_optimizer = optimizer\\n\\n def step(self, closure=None):\\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\\n\", \"path\": \"ignite/distributed/auto.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3884,"string":"3,884"},"num_tokens_diff":{"kind":"number","value":138,"string":"138"}}},{"rowIdx":18183,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_3662"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"scikit-hep__awkward-2169"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nrun cpp tests in CI \n### Version of Awkward Array\n\n2.0.6\n\n### Description and code to reproduce\n\n@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.\n\n\n\n[start of dev/copy-cpp-headers.py]\n1 \"\"\"Copy the header-only cpp headers into the various package directories that they are required\"\"\"\n2 import pathlib\n3 import shutil\n4 \n5 root_path = pathlib.Path(__file__).absolute().parents[1]\n6 source_path = root_path / \"header-only\"\n7 dest_paths = (\n8 root_path / \"awkward-cpp\" / \"header-only\",\n9 root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n10 )\n11 \n12 if __name__ == \"__main__\":\n13 for path in dest_paths:\n14 if path.exists():\n15 shutil.rmtree(path)\n16 shutil.copytree(source_path, path)\n17\n[end of dev/copy-cpp-headers.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py\n--- a/dev/copy-cpp-headers.py\n+++ b/dev/copy-cpp-headers.py\n@@ -3,7 +3,7 @@\n import shutil\n \n root_path = pathlib.Path(__file__).absolute().parents[1]\n-source_path = root_path / \"header-only\"\n+source_path = root_path / \"header-only\" / \"include\"\n dest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py\\n--- a/dev/copy-cpp-headers.py\\n+++ b/dev/copy-cpp-headers.py\\n@@ -3,7 +3,7 @@\\n import shutil\\n \\n root_path = pathlib.Path(__file__).absolute().parents[1]\\n-source_path = root_path / \\\"header-only\\\"\\n+source_path = root_path / \\\"header-only\\\" / \\\"include\\\"\\n dest_paths = (\\n root_path / \\\"awkward-cpp\\\" / \\\"header-only\\\",\\n root_path / \\\"src\\\" / \\\"awkward\\\" / \\\"_connect\\\" / \\\"header-only\\\",\\n\", \"issue\": \"run cpp tests in CI \\n### Version of Awkward Array\\n\\n2.0.6\\n\\n### Description and code to reproduce\\n\\n@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Copy the header-only cpp headers into the various package directories that they are required\\\"\\\"\\\"\\nimport pathlib\\nimport shutil\\n\\nroot_path = pathlib.Path(__file__).absolute().parents[1]\\nsource_path = root_path / \\\"header-only\\\"\\ndest_paths = (\\n root_path / \\\"awkward-cpp\\\" / \\\"header-only\\\",\\n root_path / \\\"src\\\" / \\\"awkward\\\" / \\\"_connect\\\" / \\\"header-only\\\",\\n)\\n\\nif __name__ == \\\"__main__\\\":\\n for path in dest_paths:\\n if path.exists():\\n shutil.rmtree(path)\\n shutil.copytree(source_path, path)\\n\", \"path\": \"dev/copy-cpp-headers.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":746,"string":"746"},"num_tokens_diff":{"kind":"number","value":136,"string":"136"}}},{"rowIdx":18184,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_23142"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"netbox-community__netbox-12244"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nHide WIFI password\n### NetBox version\n\nv3.4.3\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nIs it possible to hide the WIFI password as well? As now when you add the password it is added as plain-text.\r\nCan this be a hidden password with \"*********\" and icon on the side to make it visible?\r\n\r\nOr does it needs to be moved to \"secret(store) plugin?\r\n\r\n\n\n### Use case\n\nWell i think passwords dont need to be visible until you want it to be. \r\nThe best part is that you ofcourse need to be logged in to see this. \r\nBut some think this needs to be secure and im not sure if this is also plain text in the database itself? \n\n### Database changes\n\nProbally yes encrypted and hidden\n\n### External dependencies\n\nMaybe the secret plugin.\n\n\n\n[start of netbox/wireless/forms/model_forms.py]\n1 from django.utils.translation import gettext as _\n2 from dcim.models import Device, Interface, Location, Region, Site, SiteGroup\n3 from ipam.models import VLAN, VLANGroup\n4 from netbox.forms import NetBoxModelForm\n5 from tenancy.forms import TenancyForm\n6 from utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect\n7 from wireless.models import *\n8 \n9 __all__ = (\n10 'WirelessLANForm',\n11 'WirelessLANGroupForm',\n12 'WirelessLinkForm',\n13 )\n14 \n15 \n16 class WirelessLANGroupForm(NetBoxModelForm):\n17 parent = DynamicModelChoiceField(\n18 queryset=WirelessLANGroup.objects.all(),\n19 required=False\n20 )\n21 slug = SlugField()\n22 \n23 fieldsets = (\n24 ('Wireless LAN Group', (\n25 'parent', 'name', 'slug', 'description', 'tags',\n26 )),\n27 )\n28 \n29 class Meta:\n30 model = WirelessLANGroup\n31 fields = [\n32 'parent', 'name', 'slug', 'description', 'tags',\n33 ]\n34 \n35 \n36 class WirelessLANForm(TenancyForm, NetBoxModelForm):\n37 group = DynamicModelChoiceField(\n38 queryset=WirelessLANGroup.objects.all(),\n39 required=False\n40 )\n41 region = DynamicModelChoiceField(\n42 queryset=Region.objects.all(),\n43 required=False,\n44 initial_params={\n45 'sites': '$site'\n46 }\n47 )\n48 site_group = DynamicModelChoiceField(\n49 queryset=SiteGroup.objects.all(),\n50 required=False,\n51 initial_params={\n52 'sites': '$site'\n53 }\n54 )\n55 site = DynamicModelChoiceField(\n56 queryset=Site.objects.all(),\n57 required=False,\n58 null_option='None',\n59 query_params={\n60 'region_id': '$region',\n61 'group_id': '$site_group',\n62 }\n63 )\n64 vlan_group = DynamicModelChoiceField(\n65 queryset=VLANGroup.objects.all(),\n66 required=False,\n67 label=_('VLAN group'),\n68 null_option='None',\n69 query_params={\n70 'site': '$site'\n71 },\n72 initial_params={\n73 'vlans': '$vlan'\n74 }\n75 )\n76 vlan = DynamicModelChoiceField(\n77 queryset=VLAN.objects.all(),\n78 required=False,\n79 label=_('VLAN'),\n80 query_params={\n81 'site_id': '$site',\n82 'group_id': '$vlan_group',\n83 }\n84 )\n85 comments = CommentField()\n86 \n87 fieldsets = (\n88 ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')),\n89 ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)),\n90 ('Tenancy', ('tenant_group', 'tenant')),\n91 ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n92 )\n93 \n94 class Meta:\n95 model = WirelessLAN\n96 fields = [\n97 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant',\n98 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags',\n99 ]\n100 widgets = {\n101 'status': StaticSelect,\n102 'auth_type': StaticSelect,\n103 'auth_cipher': StaticSelect,\n104 }\n105 \n106 \n107 class WirelessLinkForm(TenancyForm, NetBoxModelForm):\n108 site_a = DynamicModelChoiceField(\n109 queryset=Site.objects.all(),\n110 required=False,\n111 label=_('Site'),\n112 initial_params={\n113 'devices': '$device_a',\n114 }\n115 )\n116 location_a = DynamicModelChoiceField(\n117 queryset=Location.objects.all(),\n118 query_params={\n119 'site_id': '$site_a',\n120 },\n121 required=False,\n122 label=_('Location'),\n123 initial_params={\n124 'devices': '$device_a',\n125 }\n126 )\n127 device_a = DynamicModelChoiceField(\n128 queryset=Device.objects.all(),\n129 query_params={\n130 'site_id': '$site_a',\n131 'location_id': '$location_a',\n132 },\n133 required=False,\n134 label=_('Device'),\n135 initial_params={\n136 'interfaces': '$interface_a'\n137 }\n138 )\n139 interface_a = DynamicModelChoiceField(\n140 queryset=Interface.objects.all(),\n141 query_params={\n142 'kind': 'wireless',\n143 'device_id': '$device_a',\n144 },\n145 disabled_indicator='_occupied',\n146 label=_('Interface')\n147 )\n148 site_b = DynamicModelChoiceField(\n149 queryset=Site.objects.all(),\n150 required=False,\n151 label=_('Site'),\n152 initial_params={\n153 'devices': '$device_b',\n154 }\n155 )\n156 location_b = DynamicModelChoiceField(\n157 queryset=Location.objects.all(),\n158 query_params={\n159 'site_id': '$site_b',\n160 },\n161 required=False,\n162 label=_('Location'),\n163 initial_params={\n164 'devices': '$device_b',\n165 }\n166 )\n167 device_b = DynamicModelChoiceField(\n168 queryset=Device.objects.all(),\n169 query_params={\n170 'site_id': '$site_b',\n171 'location_id': '$location_b',\n172 },\n173 required=False,\n174 label=_('Device'),\n175 initial_params={\n176 'interfaces': '$interface_b'\n177 }\n178 )\n179 interface_b = DynamicModelChoiceField(\n180 queryset=Interface.objects.all(),\n181 query_params={\n182 'kind': 'wireless',\n183 'device_id': '$device_b',\n184 },\n185 disabled_indicator='_occupied',\n186 label=_('Interface')\n187 )\n188 comments = CommentField()\n189 \n190 fieldsets = (\n191 ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')),\n192 ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')),\n193 ('Link', ('status', 'ssid', 'description', 'tags')),\n194 ('Tenancy', ('tenant_group', 'tenant')),\n195 ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n196 )\n197 \n198 class Meta:\n199 model = WirelessLink\n200 fields = [\n201 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b',\n202 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description',\n203 'comments', 'tags',\n204 ]\n205 widgets = {\n206 'status': StaticSelect,\n207 'auth_type': StaticSelect,\n208 'auth_cipher': StaticSelect,\n209 }\n210 labels = {\n211 'auth_type': 'Type',\n212 'auth_cipher': 'Cipher',\n213 }\n214\n[end of netbox/wireless/forms/model_forms.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/netbox/wireless/forms/model_forms.py b/netbox/wireless/forms/model_forms.py\n--- a/netbox/wireless/forms/model_forms.py\n+++ b/netbox/wireless/forms/model_forms.py\n@@ -1,3 +1,4 @@\n+from django.forms import PasswordInput\n from django.utils.translation import gettext as _\n from dcim.models import Device, Interface, Location, Region, Site, SiteGroup\n from ipam.models import VLAN, VLANGroup\n@@ -101,6 +102,10 @@\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n+ 'auth_psk': PasswordInput(\n+ render_value=True,\n+ attrs={'data-toggle': 'password'}\n+ ),\n }\n \n \n@@ -206,6 +211,10 @@\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n+ 'auth_psk': PasswordInput(\n+ render_value=True,\n+ attrs={'data-toggle': 'password'}\n+ ),\n }\n labels = {\n 'auth_type': 'Type',\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/netbox/wireless/forms/model_forms.py b/netbox/wireless/forms/model_forms.py\\n--- a/netbox/wireless/forms/model_forms.py\\n+++ b/netbox/wireless/forms/model_forms.py\\n@@ -1,3 +1,4 @@\\n+from django.forms import PasswordInput\\n from django.utils.translation import gettext as _\\n from dcim.models import Device, Interface, Location, Region, Site, SiteGroup\\n from ipam.models import VLAN, VLANGroup\\n@@ -101,6 +102,10 @@\\n 'status': StaticSelect,\\n 'auth_type': StaticSelect,\\n 'auth_cipher': StaticSelect,\\n+ 'auth_psk': PasswordInput(\\n+ render_value=True,\\n+ attrs={'data-toggle': 'password'}\\n+ ),\\n }\\n \\n \\n@@ -206,6 +211,10 @@\\n 'status': StaticSelect,\\n 'auth_type': StaticSelect,\\n 'auth_cipher': StaticSelect,\\n+ 'auth_psk': PasswordInput(\\n+ render_value=True,\\n+ attrs={'data-toggle': 'password'}\\n+ ),\\n }\\n labels = {\\n 'auth_type': 'Type',\\n\", \"issue\": \"Hide WIFI password\\n### NetBox version\\n\\nv3.4.3\\n\\n### Feature type\\n\\nChange to existing functionality\\n\\n### Proposed functionality\\n\\nIs it possible to hide the WIFI password as well? As now when you add the password it is added as plain-text.\\r\\nCan this be a hidden password with \\\"*********\\\" and icon on the side to make it visible?\\r\\n\\r\\nOr does it needs to be moved to \\\"secret(store) plugin?\\r\\n\\r\\n\\n\\n### Use case\\n\\nWell i think passwords dont need to be visible until you want it to be. \\r\\nThe best part is that you ofcourse need to be logged in to see this. \\r\\nBut some think this needs to be secure and im not sure if this is also plain text in the database itself? \\n\\n### Database changes\\n\\nProbally yes encrypted and hidden\\n\\n### External dependencies\\n\\nMaybe the secret plugin.\\n\", \"before_files\": [{\"content\": \"from django.utils.translation import gettext as _\\nfrom dcim.models import Device, Interface, Location, Region, Site, SiteGroup\\nfrom ipam.models import VLAN, VLANGroup\\nfrom netbox.forms import NetBoxModelForm\\nfrom tenancy.forms import TenancyForm\\nfrom utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect\\nfrom wireless.models import *\\n\\n__all__ = (\\n 'WirelessLANForm',\\n 'WirelessLANGroupForm',\\n 'WirelessLinkForm',\\n)\\n\\n\\nclass WirelessLANGroupForm(NetBoxModelForm):\\n parent = DynamicModelChoiceField(\\n queryset=WirelessLANGroup.objects.all(),\\n required=False\\n )\\n slug = SlugField()\\n\\n fieldsets = (\\n ('Wireless LAN Group', (\\n 'parent', 'name', 'slug', 'description', 'tags',\\n )),\\n )\\n\\n class Meta:\\n model = WirelessLANGroup\\n fields = [\\n 'parent', 'name', 'slug', 'description', 'tags',\\n ]\\n\\n\\nclass WirelessLANForm(TenancyForm, NetBoxModelForm):\\n group = DynamicModelChoiceField(\\n queryset=WirelessLANGroup.objects.all(),\\n required=False\\n )\\n region = DynamicModelChoiceField(\\n queryset=Region.objects.all(),\\n required=False,\\n initial_params={\\n 'sites': '$site'\\n }\\n )\\n site_group = DynamicModelChoiceField(\\n queryset=SiteGroup.objects.all(),\\n required=False,\\n initial_params={\\n 'sites': '$site'\\n }\\n )\\n site = DynamicModelChoiceField(\\n queryset=Site.objects.all(),\\n required=False,\\n null_option='None',\\n query_params={\\n 'region_id': '$region',\\n 'group_id': '$site_group',\\n }\\n )\\n vlan_group = DynamicModelChoiceField(\\n queryset=VLANGroup.objects.all(),\\n required=False,\\n label=_('VLAN group'),\\n null_option='None',\\n query_params={\\n 'site': '$site'\\n },\\n initial_params={\\n 'vlans': '$vlan'\\n }\\n )\\n vlan = DynamicModelChoiceField(\\n queryset=VLAN.objects.all(),\\n required=False,\\n label=_('VLAN'),\\n query_params={\\n 'site_id': '$site',\\n 'group_id': '$vlan_group',\\n }\\n )\\n comments = CommentField()\\n\\n fieldsets = (\\n ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')),\\n ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)),\\n ('Tenancy', ('tenant_group', 'tenant')),\\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\\n )\\n\\n class Meta:\\n model = WirelessLAN\\n fields = [\\n 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant',\\n 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags',\\n ]\\n widgets = {\\n 'status': StaticSelect,\\n 'auth_type': StaticSelect,\\n 'auth_cipher': StaticSelect,\\n }\\n\\n\\nclass WirelessLinkForm(TenancyForm, NetBoxModelForm):\\n site_a = DynamicModelChoiceField(\\n queryset=Site.objects.all(),\\n required=False,\\n label=_('Site'),\\n initial_params={\\n 'devices': '$device_a',\\n }\\n )\\n location_a = DynamicModelChoiceField(\\n queryset=Location.objects.all(),\\n query_params={\\n 'site_id': '$site_a',\\n },\\n required=False,\\n label=_('Location'),\\n initial_params={\\n 'devices': '$device_a',\\n }\\n )\\n device_a = DynamicModelChoiceField(\\n queryset=Device.objects.all(),\\n query_params={\\n 'site_id': '$site_a',\\n 'location_id': '$location_a',\\n },\\n required=False,\\n label=_('Device'),\\n initial_params={\\n 'interfaces': '$interface_a'\\n }\\n )\\n interface_a = DynamicModelChoiceField(\\n queryset=Interface.objects.all(),\\n query_params={\\n 'kind': 'wireless',\\n 'device_id': '$device_a',\\n },\\n disabled_indicator='_occupied',\\n label=_('Interface')\\n )\\n site_b = DynamicModelChoiceField(\\n queryset=Site.objects.all(),\\n required=False,\\n label=_('Site'),\\n initial_params={\\n 'devices': '$device_b',\\n }\\n )\\n location_b = DynamicModelChoiceField(\\n queryset=Location.objects.all(),\\n query_params={\\n 'site_id': '$site_b',\\n },\\n required=False,\\n label=_('Location'),\\n initial_params={\\n 'devices': '$device_b',\\n }\\n )\\n device_b = DynamicModelChoiceField(\\n queryset=Device.objects.all(),\\n query_params={\\n 'site_id': '$site_b',\\n 'location_id': '$location_b',\\n },\\n required=False,\\n label=_('Device'),\\n initial_params={\\n 'interfaces': '$interface_b'\\n }\\n )\\n interface_b = DynamicModelChoiceField(\\n queryset=Interface.objects.all(),\\n query_params={\\n 'kind': 'wireless',\\n 'device_id': '$device_b',\\n },\\n disabled_indicator='_occupied',\\n label=_('Interface')\\n )\\n comments = CommentField()\\n\\n fieldsets = (\\n ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')),\\n ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')),\\n ('Link', ('status', 'ssid', 'description', 'tags')),\\n ('Tenancy', ('tenant_group', 'tenant')),\\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\\n )\\n\\n class Meta:\\n model = WirelessLink\\n fields = [\\n 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b',\\n 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description',\\n 'comments', 'tags',\\n ]\\n widgets = {\\n 'status': StaticSelect,\\n 'auth_type': StaticSelect,\\n 'auth_cipher': StaticSelect,\\n }\\n labels = {\\n 'auth_type': 'Type',\\n 'auth_cipher': 'Cipher',\\n }\\n\", \"path\": \"netbox/wireless/forms/model_forms.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2617,"string":"2,617"},"num_tokens_diff":{"kind":"number","value":253,"string":"253"}}},{"rowIdx":18185,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_28272"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"googleapis__python-bigquery-89"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nRemove test_utils directory\nNow that test utils are available in a GitHub repo, a local `test_utils` copy is not needed anymore and should be removed.\n\n\n\n[start of noxfile.py]\n1 # Copyright 2016 Google LLC\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 \n15 from __future__ import absolute_import\n16 \n17 import os\n18 import shutil\n19 \n20 import nox\n21 \n22 \n23 BLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n24 \n25 \n26 def default(session):\n27 \"\"\"Default unit test session.\n28 \n29 This is intended to be run **without** an interpreter set, so\n30 that the current ``python`` (on the ``PATH``) or the version of\n31 Python corresponding to the ``nox`` binary the ``PATH`` can\n32 run the tests.\n33 \"\"\"\n34 # Install all test dependencies, then install local packages in-place.\n35 session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n36 session.install(\"grpcio\")\n37 session.install(\"-e\", \"test_utils\")\n38 \n39 # fastparquet is not included in .[all] because, in general, it's redundant\n40 # with pyarrow. We still want to run some unit tests with fastparquet\n41 # serialization, though.\n42 session.install(\"-e\", \".[all,fastparquet]\")\n43 \n44 # IPython does not support Python 2 after version 5.x\n45 if session.python == \"2.7\":\n46 session.install(\"ipython==5.5\")\n47 else:\n48 session.install(\"ipython\")\n49 \n50 # Run py.test against the unit tests.\n51 session.run(\n52 \"py.test\",\n53 \"--quiet\",\n54 \"--cov=google.cloud.bigquery\",\n55 \"--cov=tests.unit\",\n56 \"--cov-append\",\n57 \"--cov-config=.coveragerc\",\n58 \"--cov-report=\",\n59 \"--cov-fail-under=0\",\n60 os.path.join(\"tests\", \"unit\"),\n61 *session.posargs,\n62 )\n63 \n64 \n65 @nox.session(python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\n66 def unit(session):\n67 \"\"\"Run the unit test suite.\"\"\"\n68 default(session)\n69 \n70 \n71 @nox.session(python=[\"2.7\", \"3.8\"])\n72 def system(session):\n73 \"\"\"Run the system test suite.\"\"\"\n74 \n75 # Sanity check: Only run system tests if the environment variable is set.\n76 if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n77 session.skip(\"Credentials must be set via environment variable.\")\n78 \n79 # Use pre-release gRPC for system tests.\n80 session.install(\"--pre\", \"grpcio\")\n81 \n82 # Install all test dependencies, then install local packages in place.\n83 session.install(\"mock\", \"pytest\", \"psutil\")\n84 session.install(\"google-cloud-storage\")\n85 session.install(\"fastavro\")\n86 session.install(\"-e\", \"test_utils\")\n87 session.install(\"-e\", \".[all]\")\n88 \n89 # IPython does not support Python 2 after version 5.x\n90 if session.python == \"2.7\":\n91 session.install(\"ipython==5.5\")\n92 else:\n93 session.install(\"ipython\")\n94 \n95 # Run py.test against the system tests.\n96 session.run(\n97 \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n98 )\n99 \n100 \n101 @nox.session(python=[\"2.7\", \"3.8\"])\n102 def snippets(session):\n103 \"\"\"Run the snippets test suite.\"\"\"\n104 \n105 # Sanity check: Only run snippets tests if the environment variable is set.\n106 if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n107 session.skip(\"Credentials must be set via environment variable.\")\n108 \n109 # Install all test dependencies, then install local packages in place.\n110 session.install(\"mock\", \"pytest\")\n111 session.install(\"google-cloud-storage\")\n112 session.install(\"grpcio\")\n113 session.install(\"-e\", \"test_utils\")\n114 session.install(\"-e\", \".[all]\")\n115 \n116 # Run py.test against the snippets tests.\n117 session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n118 session.run(\"py.test\", \"samples\", *session.posargs)\n119 \n120 \n121 @nox.session(python=\"3.8\")\n122 def cover(session):\n123 \"\"\"Run the final coverage report.\n124 \n125 This outputs the coverage report aggregating coverage from the unit\n126 test runs (not system test runs), and then erases coverage data.\n127 \"\"\"\n128 session.install(\"coverage\", \"pytest-cov\")\n129 session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n130 session.run(\"coverage\", \"erase\")\n131 \n132 \n133 @nox.session(python=\"3.8\")\n134 def lint(session):\n135 \"\"\"Run linters.\n136 \n137 Returns a failure if the linters find linting errors or sufficiently\n138 serious code quality issues.\n139 \"\"\"\n140 \n141 session.install(\"black\", \"flake8\")\n142 session.install(\"-e\", \".\")\n143 session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n144 session.run(\"flake8\", \"tests\")\n145 session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n146 session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n147 session.run(\"black\", \"--check\", *BLACK_PATHS)\n148 \n149 \n150 @nox.session(python=\"3.8\")\n151 def lint_setup_py(session):\n152 \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n153 \n154 session.install(\"docutils\", \"Pygments\")\n155 session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n156 \n157 \n158 @nox.session(python=\"3.6\")\n159 def blacken(session):\n160 \"\"\"Run black.\n161 Format code to uniform standard.\n162 \n163 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n164 That run uses an image that doesn't have 3.6 installed. Before updating this\n165 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n166 \"\"\"\n167 session.install(\"black\")\n168 session.run(\"black\", *BLACK_PATHS)\n169 \n170 \n171 @nox.session(python=\"3.8\")\n172 def docs(session):\n173 \"\"\"Build the docs.\"\"\"\n174 \n175 session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n176 session.install(\"google-cloud-storage\")\n177 session.install(\"-e\", \".[all]\")\n178 \n179 shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n180 session.run(\n181 \"sphinx-build\",\n182 \"-W\", # warnings as errors\n183 \"-T\", # show full traceback on exception\n184 \"-N\", # no colors\n185 \"-b\",\n186 \"html\",\n187 \"-d\",\n188 os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n189 os.path.join(\"docs\", \"\"),\n190 os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n191 )\n192\n[end of noxfile.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -34,7 +34,7 @@\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n \n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n@@ -83,7 +83,7 @@\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n \n # IPython does not support Python 2 after version 5.x\n@@ -110,7 +110,7 @@\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n \n # Run py.test against the snippets tests.\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/noxfile.py b/noxfile.py\\n--- a/noxfile.py\\n+++ b/noxfile.py\\n@@ -34,7 +34,7 @@\\n # Install all test dependencies, then install local packages in-place.\\n session.install(\\\"mock\\\", \\\"pytest\\\", \\\"pytest-cov\\\", \\\"freezegun\\\")\\n session.install(\\\"grpcio\\\")\\n- session.install(\\\"-e\\\", \\\"test_utils\\\")\\n+ session.install(\\\"git+https://github.com/googleapis/python-test-utils\\\")\\n \\n # fastparquet is not included in .[all] because, in general, it's redundant\\n # with pyarrow. We still want to run some unit tests with fastparquet\\n@@ -83,7 +83,7 @@\\n session.install(\\\"mock\\\", \\\"pytest\\\", \\\"psutil\\\")\\n session.install(\\\"google-cloud-storage\\\")\\n session.install(\\\"fastavro\\\")\\n- session.install(\\\"-e\\\", \\\"test_utils\\\")\\n+ session.install(\\\"git+https://github.com/googleapis/python-test-utils\\\")\\n session.install(\\\"-e\\\", \\\".[all]\\\")\\n \\n # IPython does not support Python 2 after version 5.x\\n@@ -110,7 +110,7 @@\\n session.install(\\\"mock\\\", \\\"pytest\\\")\\n session.install(\\\"google-cloud-storage\\\")\\n session.install(\\\"grpcio\\\")\\n- session.install(\\\"-e\\\", \\\"test_utils\\\")\\n+ session.install(\\\"git+https://github.com/googleapis/python-test-utils\\\")\\n session.install(\\\"-e\\\", \\\".[all]\\\")\\n \\n # Run py.test against the snippets tests.\\n\", \"issue\": \"Remove test_utils directory\\nNow that test utils are available in a GitHub repo, a local `test_utils` copy is not needed anymore and should be removed.\\n\", \"before_files\": [{\"content\": \"# Copyright 2016 Google LLC\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\nfrom __future__ import absolute_import\\n\\nimport os\\nimport shutil\\n\\nimport nox\\n\\n\\nBLACK_PATHS = (\\\"docs\\\", \\\"google\\\", \\\"samples\\\", \\\"tests\\\", \\\"noxfile.py\\\", \\\"setup.py\\\")\\n\\n\\ndef default(session):\\n \\\"\\\"\\\"Default unit test session.\\n\\n This is intended to be run **without** an interpreter set, so\\n that the current ``python`` (on the ``PATH``) or the version of\\n Python corresponding to the ``nox`` binary the ``PATH`` can\\n run the tests.\\n \\\"\\\"\\\"\\n # Install all test dependencies, then install local packages in-place.\\n session.install(\\\"mock\\\", \\\"pytest\\\", \\\"pytest-cov\\\", \\\"freezegun\\\")\\n session.install(\\\"grpcio\\\")\\n session.install(\\\"-e\\\", \\\"test_utils\\\")\\n\\n # fastparquet is not included in .[all] because, in general, it's redundant\\n # with pyarrow. We still want to run some unit tests with fastparquet\\n # serialization, though.\\n session.install(\\\"-e\\\", \\\".[all,fastparquet]\\\")\\n\\n # IPython does not support Python 2 after version 5.x\\n if session.python == \\\"2.7\\\":\\n session.install(\\\"ipython==5.5\\\")\\n else:\\n session.install(\\\"ipython\\\")\\n\\n # Run py.test against the unit tests.\\n session.run(\\n \\\"py.test\\\",\\n \\\"--quiet\\\",\\n \\\"--cov=google.cloud.bigquery\\\",\\n \\\"--cov=tests.unit\\\",\\n \\\"--cov-append\\\",\\n \\\"--cov-config=.coveragerc\\\",\\n \\\"--cov-report=\\\",\\n \\\"--cov-fail-under=0\\\",\\n os.path.join(\\\"tests\\\", \\\"unit\\\"),\\n *session.posargs,\\n )\\n\\n\\n@nox.session(python=[\\\"2.7\\\", \\\"3.5\\\", \\\"3.6\\\", \\\"3.7\\\", \\\"3.8\\\"])\\ndef unit(session):\\n \\\"\\\"\\\"Run the unit test suite.\\\"\\\"\\\"\\n default(session)\\n\\n\\n@nox.session(python=[\\\"2.7\\\", \\\"3.8\\\"])\\ndef system(session):\\n \\\"\\\"\\\"Run the system test suite.\\\"\\\"\\\"\\n\\n # Sanity check: Only run system tests if the environment variable is set.\\n if not os.environ.get(\\\"GOOGLE_APPLICATION_CREDENTIALS\\\", \\\"\\\"):\\n session.skip(\\\"Credentials must be set via environment variable.\\\")\\n\\n # Use pre-release gRPC for system tests.\\n session.install(\\\"--pre\\\", \\\"grpcio\\\")\\n\\n # Install all test dependencies, then install local packages in place.\\n session.install(\\\"mock\\\", \\\"pytest\\\", \\\"psutil\\\")\\n session.install(\\\"google-cloud-storage\\\")\\n session.install(\\\"fastavro\\\")\\n session.install(\\\"-e\\\", \\\"test_utils\\\")\\n session.install(\\\"-e\\\", \\\".[all]\\\")\\n\\n # IPython does not support Python 2 after version 5.x\\n if session.python == \\\"2.7\\\":\\n session.install(\\\"ipython==5.5\\\")\\n else:\\n session.install(\\\"ipython\\\")\\n\\n # Run py.test against the system tests.\\n session.run(\\n \\\"py.test\\\", \\\"--quiet\\\", os.path.join(\\\"tests\\\", \\\"system.py\\\"), *session.posargs\\n )\\n\\n\\n@nox.session(python=[\\\"2.7\\\", \\\"3.8\\\"])\\ndef snippets(session):\\n \\\"\\\"\\\"Run the snippets test suite.\\\"\\\"\\\"\\n\\n # Sanity check: Only run snippets tests if the environment variable is set.\\n if not os.environ.get(\\\"GOOGLE_APPLICATION_CREDENTIALS\\\", \\\"\\\"):\\n session.skip(\\\"Credentials must be set via environment variable.\\\")\\n\\n # Install all test dependencies, then install local packages in place.\\n session.install(\\\"mock\\\", \\\"pytest\\\")\\n session.install(\\\"google-cloud-storage\\\")\\n session.install(\\\"grpcio\\\")\\n session.install(\\\"-e\\\", \\\"test_utils\\\")\\n session.install(\\\"-e\\\", \\\".[all]\\\")\\n\\n # Run py.test against the snippets tests.\\n session.run(\\\"py.test\\\", os.path.join(\\\"docs\\\", \\\"snippets.py\\\"), *session.posargs)\\n session.run(\\\"py.test\\\", \\\"samples\\\", *session.posargs)\\n\\n\\n@nox.session(python=\\\"3.8\\\")\\ndef cover(session):\\n \\\"\\\"\\\"Run the final coverage report.\\n\\n This outputs the coverage report aggregating coverage from the unit\\n test runs (not system test runs), and then erases coverage data.\\n \\\"\\\"\\\"\\n session.install(\\\"coverage\\\", \\\"pytest-cov\\\")\\n session.run(\\\"coverage\\\", \\\"report\\\", \\\"--show-missing\\\", \\\"--fail-under=100\\\")\\n session.run(\\\"coverage\\\", \\\"erase\\\")\\n\\n\\n@nox.session(python=\\\"3.8\\\")\\ndef lint(session):\\n \\\"\\\"\\\"Run linters.\\n\\n Returns a failure if the linters find linting errors or sufficiently\\n serious code quality issues.\\n \\\"\\\"\\\"\\n\\n session.install(\\\"black\\\", \\\"flake8\\\")\\n session.install(\\\"-e\\\", \\\".\\\")\\n session.run(\\\"flake8\\\", os.path.join(\\\"google\\\", \\\"cloud\\\", \\\"bigquery\\\"))\\n session.run(\\\"flake8\\\", \\\"tests\\\")\\n session.run(\\\"flake8\\\", os.path.join(\\\"docs\\\", \\\"samples\\\"))\\n session.run(\\\"flake8\\\", os.path.join(\\\"docs\\\", \\\"snippets.py\\\"))\\n session.run(\\\"black\\\", \\\"--check\\\", *BLACK_PATHS)\\n\\n\\n@nox.session(python=\\\"3.8\\\")\\ndef lint_setup_py(session):\\n \\\"\\\"\\\"Verify that setup.py is valid (including RST check).\\\"\\\"\\\"\\n\\n session.install(\\\"docutils\\\", \\\"Pygments\\\")\\n session.run(\\\"python\\\", \\\"setup.py\\\", \\\"check\\\", \\\"--restructuredtext\\\", \\\"--strict\\\")\\n\\n\\n@nox.session(python=\\\"3.6\\\")\\ndef blacken(session):\\n \\\"\\\"\\\"Run black.\\n Format code to uniform standard.\\n\\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\\n That run uses an image that doesn't have 3.6 installed. Before updating this\\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\\n \\\"\\\"\\\"\\n session.install(\\\"black\\\")\\n session.run(\\\"black\\\", *BLACK_PATHS)\\n\\n\\n@nox.session(python=\\\"3.8\\\")\\ndef docs(session):\\n \\\"\\\"\\\"Build the docs.\\\"\\\"\\\"\\n\\n session.install(\\\"ipython\\\", \\\"recommonmark\\\", \\\"sphinx\\\", \\\"sphinx_rtd_theme\\\")\\n session.install(\\\"google-cloud-storage\\\")\\n session.install(\\\"-e\\\", \\\".[all]\\\")\\n\\n shutil.rmtree(os.path.join(\\\"docs\\\", \\\"_build\\\"), ignore_errors=True)\\n session.run(\\n \\\"sphinx-build\\\",\\n \\\"-W\\\", # warnings as errors\\n \\\"-T\\\", # show full traceback on exception\\n \\\"-N\\\", # no colors\\n \\\"-b\\\",\\n \\\"html\\\",\\n \\\"-d\\\",\\n os.path.join(\\\"docs\\\", \\\"_build\\\", \\\"doctrees\\\", \\\"\\\"),\\n os.path.join(\\\"docs\\\", \\\"\\\"),\\n os.path.join(\\\"docs\\\", \\\"_build\\\", \\\"html\\\", \\\"\\\"),\\n )\\n\", \"path\": \"noxfile.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2599,"string":"2,599"},"num_tokens_diff":{"kind":"number","value":334,"string":"334"}}},{"rowIdx":18186,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_42985"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pytorch__vision-914"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nSTL-10 Testing Protocol\nClass STL10 does not support recommended testing protocol. \r\nSee STL-10 official page: https://cs.stanford.edu/~acoates/stl10/\n\n\n\n[start of torchvision/datasets/stl10.py]\n1 from __future__ import print_function\n2 from PIL import Image\n3 import os\n4 import os.path\n5 import numpy as np\n6 from .cifar import CIFAR10\n7 \n8 \n9 class STL10(CIFAR10):\n10 \"\"\"`STL10 `_ Dataset.\n11 \n12 Args:\n13 root (string): Root directory of dataset where directory\n14 ``stl10_binary`` exists.\n15 split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n16 Accordingly dataset is selected.\n17 transform (callable, optional): A function/transform that takes in an PIL image\n18 and returns a transformed version. E.g, ``transforms.RandomCrop``\n19 target_transform (callable, optional): A function/transform that takes in the\n20 target and transforms it.\n21 download (bool, optional): If true, downloads the dataset from the internet and\n22 puts it in root directory. If dataset is already downloaded, it is not\n23 downloaded again.\n24 \n25 \"\"\"\n26 base_folder = 'stl10_binary'\n27 url = \"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\"\n28 filename = \"stl10_binary.tar.gz\"\n29 tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n30 class_names_file = 'class_names.txt'\n31 train_list = [\n32 ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n33 ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n34 ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\n35 ]\n36 \n37 test_list = [\n38 ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\n39 ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\n40 ]\n41 splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n42 \n43 def __init__(self, root, split='train',\n44 transform=None, target_transform=None, download=False):\n45 if split not in self.splits:\n46 raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n47 split, ', '.join(self.splits),\n48 ))\n49 self.root = os.path.expanduser(root)\n50 self.transform = transform\n51 self.target_transform = target_transform\n52 self.split = split # train/test/unlabeled set\n53 \n54 if download:\n55 self.download()\n56 \n57 if not self._check_integrity():\n58 raise RuntimeError(\n59 'Dataset not found or corrupted. '\n60 'You can use download=True to download it')\n61 \n62 # now load the picked numpy arrays\n63 if self.split == 'train':\n64 self.data, self.labels = self.__loadfile(\n65 self.train_list[0][0], self.train_list[1][0])\n66 elif self.split == 'train+unlabeled':\n67 self.data, self.labels = self.__loadfile(\n68 self.train_list[0][0], self.train_list[1][0])\n69 unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n70 self.data = np.concatenate((self.data, unlabeled_data))\n71 self.labels = np.concatenate(\n72 (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\n73 \n74 elif self.split == 'unlabeled':\n75 self.data, _ = self.__loadfile(self.train_list[2][0])\n76 self.labels = np.asarray([-1] * self.data.shape[0])\n77 else: # self.split == 'test':\n78 self.data, self.labels = self.__loadfile(\n79 self.test_list[0][0], self.test_list[1][0])\n80 \n81 class_file = os.path.join(\n82 self.root, self.base_folder, self.class_names_file)\n83 if os.path.isfile(class_file):\n84 with open(class_file) as f:\n85 self.classes = f.read().splitlines()\n86 \n87 def __getitem__(self, index):\n88 \"\"\"\n89 Args:\n90 index (int): Index\n91 \n92 Returns:\n93 tuple: (image, target) where target is index of the target class.\n94 \"\"\"\n95 if self.labels is not None:\n96 img, target = self.data[index], int(self.labels[index])\n97 else:\n98 img, target = self.data[index], None\n99 \n100 # doing this so that it is consistent with all other datasets\n101 # to return a PIL Image\n102 img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n103 \n104 if self.transform is not None:\n105 img = self.transform(img)\n106 \n107 if self.target_transform is not None:\n108 target = self.target_transform(target)\n109 \n110 return img, target\n111 \n112 def __len__(self):\n113 return self.data.shape[0]\n114 \n115 def __loadfile(self, data_file, labels_file=None):\n116 labels = None\n117 if labels_file:\n118 path_to_labels = os.path.join(\n119 self.root, self.base_folder, labels_file)\n120 with open(path_to_labels, 'rb') as f:\n121 labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\n122 \n123 path_to_data = os.path.join(self.root, self.base_folder, data_file)\n124 with open(path_to_data, 'rb') as f:\n125 # read whole file in uint8 chunks\n126 everything = np.fromfile(f, dtype=np.uint8)\n127 images = np.reshape(everything, (-1, 3, 96, 96))\n128 images = np.transpose(images, (0, 1, 3, 2))\n129 \n130 return images, labels\n131 \n132 def extra_repr(self):\n133 return \"Split: {split}\".format(**self.__dict__)\n134\n[end of torchvision/datasets/stl10.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py\n--- a/torchvision/datasets/stl10.py\n+++ b/torchvision/datasets/stl10.py\n@@ -14,6 +14,9 @@\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n+ folds (int, optional): One of {0-9} or None.\n+ For training, loads one of the 10 pre-defined folds of 1k samples for the\n+ standard evaluation procedure. If no value is passed, loads the 5k samples.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n@@ -28,6 +31,7 @@\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n+ folds_list_file = 'fold_indices.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n@@ -40,7 +44,7 @@\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n \n- def __init__(self, root, split='train',\n+ def __init__(self, root, split='train', folds=None,\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n@@ -50,6 +54,7 @@\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n+ self.folds = folds # one of the 10 pre-defined folds or the full dataset\n \n if download:\n self.download()\n@@ -63,9 +68,12 @@\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n+\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n@@ -131,3 +139,16 @@\n \n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n+\n+ def __load_folds(self, folds):\n+ # loads one of the folds if specified\n+ if isinstance(folds, int):\n+ if folds >= 0 and folds < 10:\n+ path_to_folds = os.path.join(\n+ self.root, self.base_folder, self.folds_list_file)\n+ with open(path_to_folds, 'r') as f:\n+ str_idx = f.read().splitlines()[folds]\n+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\n+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\n+ else:\n+ raise ValueError('Folds \"{}\" not found. Valid splits are: 0-9.'.format(folds))\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py\\n--- a/torchvision/datasets/stl10.py\\n+++ b/torchvision/datasets/stl10.py\\n@@ -14,6 +14,9 @@\\n ``stl10_binary`` exists.\\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\\n Accordingly dataset is selected.\\n+ folds (int, optional): One of {0-9} or None.\\n+ For training, loads one of the 10 pre-defined folds of 1k samples for the\\n+ standard evaluation procedure. If no value is passed, loads the 5k samples.\\n transform (callable, optional): A function/transform that takes in an PIL image\\n and returns a transformed version. E.g, ``transforms.RandomCrop``\\n target_transform (callable, optional): A function/transform that takes in the\\n@@ -28,6 +31,7 @@\\n filename = \\\"stl10_binary.tar.gz\\\"\\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\\n class_names_file = 'class_names.txt'\\n+ folds_list_file = 'fold_indices.txt'\\n train_list = [\\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\\n@@ -40,7 +44,7 @@\\n ]\\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\\n \\n- def __init__(self, root, split='train',\\n+ def __init__(self, root, split='train', folds=None,\\n transform=None, target_transform=None, download=False):\\n if split not in self.splits:\\n raise ValueError('Split \\\"{}\\\" not found. Valid splits are: {}'.format(\\n@@ -50,6 +54,7 @@\\n self.transform = transform\\n self.target_transform = target_transform\\n self.split = split # train/test/unlabeled set\\n+ self.folds = folds # one of the 10 pre-defined folds or the full dataset\\n \\n if download:\\n self.download()\\n@@ -63,9 +68,12 @@\\n if self.split == 'train':\\n self.data, self.labels = self.__loadfile(\\n self.train_list[0][0], self.train_list[1][0])\\n+ self.__load_folds(folds)\\n+\\n elif self.split == 'train+unlabeled':\\n self.data, self.labels = self.__loadfile(\\n self.train_list[0][0], self.train_list[1][0])\\n+ self.__load_folds(folds)\\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\\n self.data = np.concatenate((self.data, unlabeled_data))\\n self.labels = np.concatenate(\\n@@ -131,3 +139,16 @@\\n \\n def extra_repr(self):\\n return \\\"Split: {split}\\\".format(**self.__dict__)\\n+\\n+ def __load_folds(self, folds):\\n+ # loads one of the folds if specified\\n+ if isinstance(folds, int):\\n+ if folds >= 0 and folds < 10:\\n+ path_to_folds = os.path.join(\\n+ self.root, self.base_folder, self.folds_list_file)\\n+ with open(path_to_folds, 'r') as f:\\n+ str_idx = f.read().splitlines()[folds]\\n+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\\n+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\\n+ else:\\n+ raise ValueError('Folds \\\"{}\\\" not found. Valid splits are: 0-9.'.format(folds))\\n\", \"issue\": \"STL-10 Testing Protocol\\nClass STL10 does not support recommended testing protocol. \\r\\nSee STL-10 official page: https://cs.stanford.edu/~acoates/stl10/\\n\", \"before_files\": [{\"content\": \"from __future__ import print_function\\nfrom PIL import Image\\nimport os\\nimport os.path\\nimport numpy as np\\nfrom .cifar import CIFAR10\\n\\n\\nclass STL10(CIFAR10):\\n \\\"\\\"\\\"`STL10 `_ Dataset.\\n\\n Args:\\n root (string): Root directory of dataset where directory\\n ``stl10_binary`` exists.\\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\\n Accordingly dataset is selected.\\n transform (callable, optional): A function/transform that takes in an PIL image\\n and returns a transformed version. E.g, ``transforms.RandomCrop``\\n target_transform (callable, optional): A function/transform that takes in the\\n target and transforms it.\\n download (bool, optional): If true, downloads the dataset from the internet and\\n puts it in root directory. If dataset is already downloaded, it is not\\n downloaded again.\\n\\n \\\"\\\"\\\"\\n base_folder = 'stl10_binary'\\n url = \\\"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\\\"\\n filename = \\\"stl10_binary.tar.gz\\\"\\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\\n class_names_file = 'class_names.txt'\\n train_list = [\\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\\n ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\\n ]\\n\\n test_list = [\\n ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\\n ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\\n ]\\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\\n\\n def __init__(self, root, split='train',\\n transform=None, target_transform=None, download=False):\\n if split not in self.splits:\\n raise ValueError('Split \\\"{}\\\" not found. Valid splits are: {}'.format(\\n split, ', '.join(self.splits),\\n ))\\n self.root = os.path.expanduser(root)\\n self.transform = transform\\n self.target_transform = target_transform\\n self.split = split # train/test/unlabeled set\\n\\n if download:\\n self.download()\\n\\n if not self._check_integrity():\\n raise RuntimeError(\\n 'Dataset not found or corrupted. '\\n 'You can use download=True to download it')\\n\\n # now load the picked numpy arrays\\n if self.split == 'train':\\n self.data, self.labels = self.__loadfile(\\n self.train_list[0][0], self.train_list[1][0])\\n elif self.split == 'train+unlabeled':\\n self.data, self.labels = self.__loadfile(\\n self.train_list[0][0], self.train_list[1][0])\\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\\n self.data = np.concatenate((self.data, unlabeled_data))\\n self.labels = np.concatenate(\\n (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\\n\\n elif self.split == 'unlabeled':\\n self.data, _ = self.__loadfile(self.train_list[2][0])\\n self.labels = np.asarray([-1] * self.data.shape[0])\\n else: # self.split == 'test':\\n self.data, self.labels = self.__loadfile(\\n self.test_list[0][0], self.test_list[1][0])\\n\\n class_file = os.path.join(\\n self.root, self.base_folder, self.class_names_file)\\n if os.path.isfile(class_file):\\n with open(class_file) as f:\\n self.classes = f.read().splitlines()\\n\\n def __getitem__(self, index):\\n \\\"\\\"\\\"\\n Args:\\n index (int): Index\\n\\n Returns:\\n tuple: (image, target) where target is index of the target class.\\n \\\"\\\"\\\"\\n if self.labels is not None:\\n img, target = self.data[index], int(self.labels[index])\\n else:\\n img, target = self.data[index], None\\n\\n # doing this so that it is consistent with all other datasets\\n # to return a PIL Image\\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\\n\\n if self.transform is not None:\\n img = self.transform(img)\\n\\n if self.target_transform is not None:\\n target = self.target_transform(target)\\n\\n return img, target\\n\\n def __len__(self):\\n return self.data.shape[0]\\n\\n def __loadfile(self, data_file, labels_file=None):\\n labels = None\\n if labels_file:\\n path_to_labels = os.path.join(\\n self.root, self.base_folder, labels_file)\\n with open(path_to_labels, 'rb') as f:\\n labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\\n\\n path_to_data = os.path.join(self.root, self.base_folder, data_file)\\n with open(path_to_data, 'rb') as f:\\n # read whole file in uint8 chunks\\n everything = np.fromfile(f, dtype=np.uint8)\\n images = np.reshape(everything, (-1, 3, 96, 96))\\n images = np.transpose(images, (0, 1, 3, 2))\\n\\n return images, labels\\n\\n def extra_repr(self):\\n return \\\"Split: {split}\\\".format(**self.__dict__)\\n\", \"path\": \"torchvision/datasets/stl10.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2229,"string":"2,229"},"num_tokens_diff":{"kind":"number","value":910,"string":"910"}}},{"rowIdx":18187,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_15580"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"Azure__azure-cli-extensions-590"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nHelp documentation returns error for \"min_profile\" and \"max_profile\"\n### Extension name (the extension in question)\r\nstorage-preview\r\n\r\n### Description of issue (in as much detail as possible)\r\nReturns the following error when prompting for help via `az storage account -h`\r\n\r\n```\r\nHelp entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\cli.py\", line 206, in invoke\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\commands\\__init__.py\", line 276, in execute\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1734, in parse_args\r\n args, argv = self.parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1954, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1931, in consume_positionals\r\n take_action(action, args)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1137, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1954, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1931, in consume_positionals\r\n take_action(action, args)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1137, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1972, in _parse_known_args\r\n start_index = consume_optional(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1912, in consume_optional\r\n take_action(action, args, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1024, in __call__\r\n parser.print_help()\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 2366, in print_help\r\n self._print_message(self.format_help(), file)\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\parser.py\", line 154, in format_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\parser.py\", line 225, in format_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 146, in show_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 664, in show_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 219, in __init__\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 240, in load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help_loaders.py\", line 152, in versioned_load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 178, in load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 183, in _load_from_file\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 234, in _load_from_data\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 201, in _should_include_example\r\nknack.help.HelpAuthoringException: Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\r\n```\r\n-----\r\n\r\n\n\n\n\n[start of src/storage-preview/azext_storage_preview/_help.py]\n1 # coding=utf-8\n2 # --------------------------------------------------------------------------------------------\n3 # Copyright (c) Microsoft Corporation. All rights reserved.\n4 # Licensed under the MIT License. See License.txt in the project root for license information.\n5 # --------------------------------------------------------------------------------------------\n6 \n7 from knack.help_files import helps\n8 \n9 # pylint: disable=line-too-long, too-many-lines\n10 \n11 helps['storage account create'] = \"\"\"\n12 type: command\n13 short-summary: Create a storage account.\n14 long-summary: >\n15 The SKU of the storage account defaults to 'Standard_RAGRS'.\n16 examples:\n17 - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n18 text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n19 min_profile: latest\n20 - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n21 text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\n22 max_profile: 2017-03-09-profile\n23 \"\"\"\n24 \n25 helps['storage account update'] = \"\"\"\n26 type: command\n27 short-summary: Update the properties of a storage account.\n28 \"\"\"\n29 \n30 helps['storage blob service-properties'] = \"\"\"\n31 type: group\n32 short-summary: Manage storage blob service properties.\n33 \"\"\"\n34 \n35 helps['storage blob service-properties update'] = \"\"\"\n36 type: command\n37 short-summary: Update storage blob service properties.\n38 \"\"\"\n39 \n40 helps['storage account management-policy'] = \"\"\"\n41 type: group\n42 short-summary: Manage storage account management policies.\n43 \"\"\"\n44 \n45 helps['storage account management-policy create'] = \"\"\"\n46 type: command\n47 short-summary: Creates the data policy rules associated with the specified storage account.\n48 \"\"\"\n49 \n50 helps['storage account management-policy update'] = \"\"\"\n51 type: command\n52 short-summary: Updates the data policy rules associated with the specified storage account.\n53 \"\"\"\n54 \n55 helps['storage azcopy'] = \"\"\"\n56 type: group\n57 short-summary: |\n58 [EXPERIMENTAL] Manage storage operations utilizing AzCopy.\n59 long-summary: |\n60 Open issues here: https://github.com/Azure/azure-storage-azcopy\n61 \"\"\"\n62 \n63 helps['storage azcopy blob'] = \"\"\"\n64 type: group\n65 short-summary: Manage object storage for unstructured data (blobs) using AzCopy.\n66 \"\"\"\n67 \n68 helps['storage azcopy blob upload'] = \"\"\"\n69 type: command\n70 short-summary: Upload blobs to a storage blob container using AzCopy.\n71 examples:\n72 - name: Upload a single blob to a container.\n73 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/file\" -d NewBlob\n74 - name: Upload a directory to a container.\n75 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory\" --recursive\n76 - name: Upload the contents of a directory to a container.\n77 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory/*\" --recursive\n78 \"\"\"\n79 \n80 helps['storage azcopy blob download'] = \"\"\"\n81 type: command\n82 short-summary: Download blobs from a storage blob container using AzCopy.\n83 examples:\n84 - name: Download a single blob from a container.\n85 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/blob\" -d \"path/to/file\"\n86 - name: Download a virtual directory from a container.\n87 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/virtual_directory\" -d \"download/path\" --recursive\n88 - name: Download the contents of a container onto a local file system.\n89 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d \"download/path\" --recursive\n90 \"\"\"\n91 \n92 helps['storage azcopy blob delete'] = \"\"\"\n93 type: command\n94 short-summary: Delete blobs from a storage blob container using AzCopy.\n95 examples:\n96 - name: Delete a single blob from a container.\n97 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob\n98 - name: Delete all blobs from a container.\n99 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive\n100 - name: Delete all blobs in a virtual directory.\n101 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t \"path/to/virtual_directory\" --recursive\n102 \"\"\"\n103 \n104 helps['storage azcopy run-command'] = \"\"\"\n105 type: command\n106 short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.\n107 \"\"\"\n108\n[end of src/storage-preview/azext_storage_preview/_help.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py\n--- a/src/storage-preview/azext_storage_preview/_help.py\n+++ b/src/storage-preview/azext_storage_preview/_help.py\n@@ -16,10 +16,6 @@\n examples:\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n- min_profile: latest\n- - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n- text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\n- max_profile: 2017-03-09-profile\n \"\"\"\n \n helps['storage account update'] = \"\"\"\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py\\n--- a/src/storage-preview/azext_storage_preview/_help.py\\n+++ b/src/storage-preview/azext_storage_preview/_help.py\\n@@ -16,10 +16,6 @@\\n examples:\\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\\n- min_profile: latest\\n- - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\\n- text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\\n- max_profile: 2017-03-09-profile\\n \\\"\\\"\\\"\\n \\n helps['storage account update'] = \\\"\\\"\\\"\\n\", \"issue\": \"Help documentation returns error for \\\"min_profile\\\" and \\\"max_profile\\\"\\n### Extension name (the extension in question)\\r\\nstorage-preview\\r\\n\\r\\n### Description of issue (in as much detail as possible)\\r\\nReturns the following error when prompting for help via `az storage account -h`\\r\\n\\r\\n```\\r\\nHelp entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\\r\\nTraceback (most recent call last):\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\knack\\\\knack\\\\cli.py\\\", line 206, in invoke\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\commands\\\\__init__.py\\\", line 276, in execute\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1734, in parse_args\\r\\n args, argv = self.parse_known_args(args, namespace)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1766, in parse_known_args\\r\\n namespace, args = self._parse_known_args(args, namespace)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1954, in _parse_known_args\\r\\n positionals_end_index = consume_positionals(start_index)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1931, in consume_positionals\\r\\n take_action(action, args)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1840, in take_action\\r\\n action(self, namespace, argument_values, option_string)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1137, in __call__\\r\\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1766, in parse_known_args\\r\\n namespace, args = self._parse_known_args(args, namespace)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1954, in _parse_known_args\\r\\n positionals_end_index = consume_positionals(start_index)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1931, in consume_positionals\\r\\n take_action(action, args)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1840, in take_action\\r\\n action(self, namespace, argument_values, option_string)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1137, in __call__\\r\\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1766, in parse_known_args\\r\\n namespace, args = self._parse_known_args(args, namespace)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1972, in _parse_known_args\\r\\n start_index = consume_optional(start_index)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1912, in consume_optional\\r\\n take_action(action, args, option_string)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1840, in take_action\\r\\n action(self, namespace, argument_values, option_string)\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 1024, in __call__\\r\\n parser.print_help()\\r\\n File \\\"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\CLI2\\\\lib\\\\argparse.py\\\", line 2366, in print_help\\r\\n self._print_message(self.format_help(), file)\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\parser.py\\\", line 154, in format_help\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\knack\\\\knack\\\\parser.py\\\", line 225, in format_help\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\_help.py\\\", line 146, in show_help\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\knack\\\\knack\\\\help.py\\\", line 664, in show_help\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\knack\\\\knack\\\\help.py\\\", line 219, in __init__\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\_help.py\\\", line 240, in load\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\_help_loaders.py\\\", line 152, in versioned_load\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\knack\\\\knack\\\\help.py\\\", line 178, in load\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\knack\\\\knack\\\\help.py\\\", line 183, in _load_from_file\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\_help.py\\\", line 234, in _load_from_data\\r\\n File \\\"C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\pip-install-moqk8ce9\\\\azure-cli-core\\\\azure\\\\cli\\\\core\\\\_help.py\\\", line 201, in _should_include_example\\r\\nknack.help.HelpAuthoringException: Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\\r\\n```\\r\\n-----\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"# coding=utf-8\\n# --------------------------------------------------------------------------------------------\\n# Copyright (c) Microsoft Corporation. All rights reserved.\\n# Licensed under the MIT License. See License.txt in the project root for license information.\\n# --------------------------------------------------------------------------------------------\\n\\nfrom knack.help_files import helps\\n\\n# pylint: disable=line-too-long, too-many-lines\\n\\nhelps['storage account create'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Create a storage account.\\n long-summary: >\\n The SKU of the storage account defaults to 'Standard_RAGRS'.\\n examples:\\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\\n min_profile: latest\\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\\n max_profile: 2017-03-09-profile\\n\\\"\\\"\\\"\\n\\nhelps['storage account update'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Update the properties of a storage account.\\n\\\"\\\"\\\"\\n\\nhelps['storage blob service-properties'] = \\\"\\\"\\\"\\n type: group\\n short-summary: Manage storage blob service properties.\\n\\\"\\\"\\\"\\n\\nhelps['storage blob service-properties update'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Update storage blob service properties.\\n\\\"\\\"\\\"\\n\\nhelps['storage account management-policy'] = \\\"\\\"\\\"\\n type: group\\n short-summary: Manage storage account management policies.\\n\\\"\\\"\\\"\\n\\nhelps['storage account management-policy create'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Creates the data policy rules associated with the specified storage account.\\n\\\"\\\"\\\"\\n\\nhelps['storage account management-policy update'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Updates the data policy rules associated with the specified storage account.\\n\\\"\\\"\\\"\\n\\nhelps['storage azcopy'] = \\\"\\\"\\\"\\n type: group\\n short-summary: |\\n [EXPERIMENTAL] Manage storage operations utilizing AzCopy.\\n long-summary: |\\n Open issues here: https://github.com/Azure/azure-storage-azcopy\\n\\\"\\\"\\\"\\n\\nhelps['storage azcopy blob'] = \\\"\\\"\\\"\\n type: group\\n short-summary: Manage object storage for unstructured data (blobs) using AzCopy.\\n\\\"\\\"\\\"\\n\\nhelps['storage azcopy blob upload'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Upload blobs to a storage blob container using AzCopy.\\n examples:\\n - name: Upload a single blob to a container.\\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \\\"path/to/file\\\" -d NewBlob\\n - name: Upload a directory to a container.\\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \\\"path/to/directory\\\" --recursive\\n - name: Upload the contents of a directory to a container.\\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \\\"path/to/directory/*\\\" --recursive\\n\\\"\\\"\\\"\\n\\nhelps['storage azcopy blob download'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Download blobs from a storage blob container using AzCopy.\\n examples:\\n - name: Download a single blob from a container.\\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \\\"path/to/blob\\\" -d \\\"path/to/file\\\"\\n - name: Download a virtual directory from a container.\\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \\\"path/to/virtual_directory\\\" -d \\\"download/path\\\" --recursive\\n - name: Download the contents of a container onto a local file system.\\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d \\\"download/path\\\" --recursive\\n\\\"\\\"\\\"\\n\\nhelps['storage azcopy blob delete'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Delete blobs from a storage blob container using AzCopy.\\n examples:\\n - name: Delete a single blob from a container.\\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob\\n - name: Delete all blobs from a container.\\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive\\n - name: Delete all blobs in a virtual directory.\\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t \\\"path/to/virtual_directory\\\" --recursive\\n\\\"\\\"\\\"\\n\\nhelps['storage azcopy run-command'] = \\\"\\\"\\\"\\n type: command\\n short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.\\n\\\"\\\"\\\"\\n\", \"path\": \"src/storage-preview/azext_storage_preview/_help.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3417,"string":"3,417"},"num_tokens_diff":{"kind":"number","value":226,"string":"226"}}},{"rowIdx":18188,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_11962"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"svthalia__concrexit-2591"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nUpdating promo requests \n**Describe the bug**\r\nIt is not possible to update the promo request within a week of the publish date \r\n\r\n**How to reproduce** \r\nCreate a promo request \r\nTry to update the designer within a week to publish date \r\n\r\n**Expected behavior** \r\nThe request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited. \n\n\n\n[start of website/promotion/forms.py]\n1 from django import forms\n2 from django.utils import timezone\n3 \n4 from promotion.models import PromotionRequest\n5 from thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\n6 \n7 \n8 class PromotionRequestForm(forms.ModelForm):\n9 class Meta:\n10 model = PromotionRequest\n11 fields = [\n12 \"event\",\n13 \"publish_date\",\n14 \"channel\",\n15 \"assigned_to\",\n16 \"status\",\n17 \"drive_folder\",\n18 \"remarks\",\n19 ]\n20 \n21 def clean_publish_date(self):\n22 publish_date = self.cleaned_data.get(\"publish_date\")\n23 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n24 if timezone.localdate() > create_time_minimum:\n25 raise forms.ValidationError(\n26 \"Publish date cannot be within a week from now.\"\n27 )\n28 if \"publish_date\" in self.changed_data:\n29 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n30 if timezone.localdate() > create_time_minimum:\n31 raise forms.ValidationError(\n32 \"Publish date cannot be within a week from now.\"\n33 )\n34 return publish_date\n35\n[end of website/promotion/forms.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/website/promotion/forms.py b/website/promotion/forms.py\n--- a/website/promotion/forms.py\n+++ b/website/promotion/forms.py\n@@ -20,11 +20,6 @@\n \n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n- if timezone.localdate() > create_time_minimum:\n- raise forms.ValidationError(\n- \"Publish date cannot be within a week from now.\"\n- )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/website/promotion/forms.py b/website/promotion/forms.py\\n--- a/website/promotion/forms.py\\n+++ b/website/promotion/forms.py\\n@@ -20,11 +20,6 @@\\n \\n def clean_publish_date(self):\\n publish_date = self.cleaned_data.get(\\\"publish_date\\\")\\n- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\\n- if timezone.localdate() > create_time_minimum:\\n- raise forms.ValidationError(\\n- \\\"Publish date cannot be within a week from now.\\\"\\n- )\\n if \\\"publish_date\\\" in self.changed_data:\\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\\n if timezone.localdate() > create_time_minimum:\\n\", \"issue\": \"Updating promo requests \\n**Describe the bug**\\r\\nIt is not possible to update the promo request within a week of the publish date \\r\\n\\r\\n**How to reproduce** \\r\\nCreate a promo request \\r\\nTry to update the designer within a week to publish date \\r\\n\\r\\n**Expected behavior** \\r\\nThe request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited. \\n\", \"before_files\": [{\"content\": \"from django import forms\\nfrom django.utils import timezone\\n\\nfrom promotion.models import PromotionRequest\\nfrom thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\\n\\n\\nclass PromotionRequestForm(forms.ModelForm):\\n class Meta:\\n model = PromotionRequest\\n fields = [\\n \\\"event\\\",\\n \\\"publish_date\\\",\\n \\\"channel\\\",\\n \\\"assigned_to\\\",\\n \\\"status\\\",\\n \\\"drive_folder\\\",\\n \\\"remarks\\\",\\n ]\\n\\n def clean_publish_date(self):\\n publish_date = self.cleaned_data.get(\\\"publish_date\\\")\\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\\n if timezone.localdate() > create_time_minimum:\\n raise forms.ValidationError(\\n \\\"Publish date cannot be within a week from now.\\\"\\n )\\n if \\\"publish_date\\\" in self.changed_data:\\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\\n if timezone.localdate() > create_time_minimum:\\n raise forms.ValidationError(\\n \\\"Publish date cannot be within a week from now.\\\"\\n )\\n return publish_date\\n\", \"path\": \"website/promotion/forms.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":900,"string":"900"},"num_tokens_diff":{"kind":"number","value":160,"string":"160"}}},{"rowIdx":18189,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_33629"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pre-commit__pre-commit-372"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nNot-found executable crashes framework\nThis was introduced with the new exe logic in 0.8.0\n\nHere's a simple reproduction:\n\n``` yaml\n- repo: local\n hooks:\n - id: test\n name: test\n language: system\n entry: i-dont-exist-lol\n files: '\\.py$'\n```\n\n```\n$ pre-commit run --all-files\ntest.....................................................................An unexpected error has occurred: OSError: Executable i-dont-exist-lol not found\nCheck the log at ~/.pre-commit/pre-commit.log\n```\n\n\n\n\n[start of pre_commit/util.py]\n1 from __future__ import unicode_literals\n2 \n3 import contextlib\n4 import errno\n5 import functools\n6 import os\n7 import os.path\n8 import shutil\n9 import stat\n10 import subprocess\n11 import tarfile\n12 import tempfile\n13 \n14 import pkg_resources\n15 \n16 from pre_commit import five\n17 from pre_commit import parse_shebang\n18 \n19 \n20 @contextlib.contextmanager\n21 def cwd(path):\n22 original_cwd = os.getcwd()\n23 os.chdir(path)\n24 try:\n25 yield\n26 finally:\n27 os.chdir(original_cwd)\n28 \n29 \n30 def mkdirp(path):\n31 try:\n32 os.makedirs(path)\n33 except OSError:\n34 if not os.path.exists(path):\n35 raise\n36 \n37 \n38 def memoize_by_cwd(func):\n39 \"\"\"Memoize a function call based on os.getcwd().\"\"\"\n40 @functools.wraps(func)\n41 def wrapper(*args):\n42 cwd = os.getcwd()\n43 key = (cwd,) + args\n44 try:\n45 return wrapper._cache[key]\n46 except KeyError:\n47 ret = wrapper._cache[key] = func(*args)\n48 return ret\n49 \n50 wrapper._cache = {}\n51 \n52 return wrapper\n53 \n54 \n55 @contextlib.contextmanager\n56 def clean_path_on_failure(path):\n57 \"\"\"Cleans up the directory on an exceptional failure.\"\"\"\n58 try:\n59 yield\n60 except BaseException:\n61 if os.path.exists(path):\n62 rmtree(path)\n63 raise\n64 \n65 \n66 @contextlib.contextmanager\n67 def noop_context():\n68 yield\n69 \n70 \n71 def no_git_env():\n72 # Too many bugs dealing with environment variables and GIT:\n73 # https://github.com/pre-commit/pre-commit/issues/300\n74 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n75 # pre-commit hooks\n76 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n77 # while running pre-commit hooks in submodules.\n78 # GIT_DIR: Causes git clone to clone wrong thing\n79 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n80 return dict(\n81 (k, v) for k, v in os.environ.items() if not k.startswith('GIT_')\n82 )\n83 \n84 \n85 @contextlib.contextmanager\n86 def tarfile_open(*args, **kwargs):\n87 \"\"\"Compatibility layer because python2.6\"\"\"\n88 tf = tarfile.open(*args, **kwargs)\n89 try:\n90 yield tf\n91 finally:\n92 tf.close()\n93 \n94 \n95 @contextlib.contextmanager\n96 def tmpdir():\n97 \"\"\"Contextmanager to create a temporary directory. It will be cleaned up\n98 afterwards.\n99 \"\"\"\n100 tempdir = tempfile.mkdtemp()\n101 try:\n102 yield tempdir\n103 finally:\n104 rmtree(tempdir)\n105 \n106 \n107 def resource_filename(filename):\n108 return pkg_resources.resource_filename(\n109 'pre_commit',\n110 os.path.join('resources', filename),\n111 )\n112 \n113 \n114 def make_executable(filename):\n115 original_mode = os.stat(filename).st_mode\n116 os.chmod(\n117 filename,\n118 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n119 )\n120 \n121 \n122 class CalledProcessError(RuntimeError):\n123 def __init__(self, returncode, cmd, expected_returncode, output=None):\n124 super(CalledProcessError, self).__init__(\n125 returncode, cmd, expected_returncode, output,\n126 )\n127 self.returncode = returncode\n128 self.cmd = cmd\n129 self.expected_returncode = expected_returncode\n130 self.output = output\n131 \n132 def to_bytes(self):\n133 output = []\n134 for maybe_text in self.output:\n135 if maybe_text:\n136 output.append(\n137 b'\\n ' +\n138 five.to_bytes(maybe_text).replace(b'\\n', b'\\n ')\n139 )\n140 else:\n141 output.append(b'(none)')\n142 \n143 return b''.join((\n144 five.to_bytes(\n145 'Command: {0!r}\\n'\n146 'Return code: {1}\\n'\n147 'Expected return code: {2}\\n'.format(\n148 self.cmd, self.returncode, self.expected_returncode\n149 )\n150 ),\n151 b'Output: ', output[0], b'\\n',\n152 b'Errors: ', output[1], b'\\n',\n153 ))\n154 \n155 def to_text(self):\n156 return self.to_bytes().decode('UTF-8')\n157 \n158 if five.PY3: # pragma: no cover (py3)\n159 __bytes__ = to_bytes\n160 __str__ = to_text\n161 else: # pragma: no cover (py2)\n162 __str__ = to_bytes\n163 __unicode__ = to_text\n164 \n165 \n166 def cmd_output(*cmd, **kwargs):\n167 retcode = kwargs.pop('retcode', 0)\n168 encoding = kwargs.pop('encoding', 'UTF-8')\n169 __popen = kwargs.pop('__popen', subprocess.Popen)\n170 \n171 popen_kwargs = {\n172 'stdin': subprocess.PIPE,\n173 'stdout': subprocess.PIPE,\n174 'stderr': subprocess.PIPE,\n175 }\n176 \n177 # py2/py3 on windows are more strict about the types here\n178 cmd = tuple(five.n(arg) for arg in cmd)\n179 kwargs['env'] = dict(\n180 (five.n(key), five.n(value))\n181 for key, value in kwargs.pop('env', {}).items()\n182 ) or None\n183 \n184 cmd = parse_shebang.normalize_cmd(cmd)\n185 \n186 popen_kwargs.update(kwargs)\n187 proc = __popen(cmd, **popen_kwargs)\n188 stdout, stderr = proc.communicate()\n189 if encoding is not None and stdout is not None:\n190 stdout = stdout.decode(encoding)\n191 if encoding is not None and stderr is not None:\n192 stderr = stderr.decode(encoding)\n193 returncode = proc.returncode\n194 \n195 if retcode is not None and retcode != returncode:\n196 raise CalledProcessError(\n197 returncode, cmd, retcode, output=(stdout, stderr),\n198 )\n199 \n200 return proc.returncode, stdout, stderr\n201 \n202 \n203 def rmtree(path):\n204 \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n205 def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n206 excvalue = exc[1]\n207 if (\n208 func in (os.rmdir, os.remove, os.unlink) and\n209 excvalue.errno == errno.EACCES\n210 ):\n211 os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n212 func(path)\n213 else:\n214 raise\n215 shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\n216\n[end of pre_commit/util.py]\n[start of pre_commit/parse_shebang.py]\n1 from __future__ import absolute_import\n2 from __future__ import unicode_literals\n3 \n4 import io\n5 import os.path\n6 import shlex\n7 import string\n8 \n9 from pre_commit import five\n10 \n11 \n12 printable = frozenset(string.printable)\n13 \n14 \n15 def parse_bytesio(bytesio):\n16 \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n17 if bytesio.read(2) != b'#!':\n18 return ()\n19 first_line = bytesio.readline()\n20 try:\n21 first_line = first_line.decode('US-ASCII')\n22 except UnicodeDecodeError:\n23 return ()\n24 \n25 # Require only printable ascii\n26 for c in first_line:\n27 if c not in printable:\n28 return ()\n29 \n30 # shlex.split is horribly broken in py26 on text strings\n31 cmd = tuple(shlex.split(five.n(first_line)))\n32 if cmd[0] == '/usr/bin/env':\n33 cmd = cmd[1:]\n34 return cmd\n35 \n36 \n37 def parse_filename(filename):\n38 \"\"\"Parse the shebang given a filename.\"\"\"\n39 if not os.path.exists(filename) or not os.access(filename, os.X_OK):\n40 return ()\n41 \n42 with io.open(filename, 'rb') as f:\n43 return parse_bytesio(f)\n44 \n45 \n46 def find_executable(exe, _environ=None):\n47 exe = os.path.normpath(exe)\n48 if os.sep in exe:\n49 return exe\n50 \n51 environ = _environ if _environ is not None else os.environ\n52 \n53 if 'PATHEXT' in environ:\n54 possible_exe_names = (exe,) + tuple(\n55 exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\n56 )\n57 else:\n58 possible_exe_names = (exe,)\n59 \n60 for path in environ.get('PATH', '').split(os.pathsep):\n61 for possible_exe_name in possible_exe_names:\n62 joined = os.path.join(path, possible_exe_name)\n63 if os.path.isfile(joined) and os.access(joined, os.X_OK):\n64 return joined\n65 else:\n66 return None\n67 \n68 \n69 def normexe(orig_exe):\n70 if os.sep not in orig_exe:\n71 exe = find_executable(orig_exe)\n72 if exe is None:\n73 raise OSError('Executable {0} not found'.format(orig_exe))\n74 return exe\n75 else:\n76 return orig_exe\n77 \n78 \n79 def normalize_cmd(cmd):\n80 \"\"\"Fixes for the following issues on windows\n81 - http://bugs.python.org/issue8557\n82 - windows does not parse shebangs\n83 \n84 This function also makes deep-path shebangs work just fine\n85 \"\"\"\n86 # Use PATH to determine the executable\n87 exe = normexe(cmd[0])\n88 \n89 # Figure out the shebang from the resulting command\n90 cmd = parse_filename(exe) + (exe,) + cmd[1:]\n91 \n92 # This could have given us back another bare executable\n93 exe = normexe(cmd[0])\n94 \n95 return (exe,) + cmd[1:]\n96\n[end of pre_commit/parse_shebang.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py\n--- a/pre_commit/parse_shebang.py\n+++ b/pre_commit/parse_shebang.py\n@@ -12,6 +12,10 @@\n printable = frozenset(string.printable)\n \n \n+class ExecutableNotFoundError(OSError):\n+ pass\n+\n+\n def parse_bytesio(bytesio):\n \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n if bytesio.read(2) != b'#!':\n@@ -70,7 +74,9 @@\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n- raise OSError('Executable {0} not found'.format(orig_exe))\n+ raise ExecutableNotFoundError(\n+ 'Executable `{0}` not found'.format(orig_exe),\n+ )\n return exe\n else:\n return orig_exe\ndiff --git a/pre_commit/util.py b/pre_commit/util.py\n--- a/pre_commit/util.py\n+++ b/pre_commit/util.py\n@@ -181,23 +181,26 @@\n for key, value in kwargs.pop('env', {}).items()\n ) or None\n \n- cmd = parse_shebang.normalize_cmd(cmd)\n-\n- popen_kwargs.update(kwargs)\n- proc = __popen(cmd, **popen_kwargs)\n- stdout, stderr = proc.communicate()\n- if encoding is not None and stdout is not None:\n- stdout = stdout.decode(encoding)\n- if encoding is not None and stderr is not None:\n- stderr = stderr.decode(encoding)\n- returncode = proc.returncode\n+ try:\n+ cmd = parse_shebang.normalize_cmd(cmd)\n+ except parse_shebang.ExecutableNotFoundError as e:\n+ returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'')\n+ else:\n+ popen_kwargs.update(kwargs)\n+ proc = __popen(cmd, **popen_kwargs)\n+ stdout, stderr = proc.communicate()\n+ if encoding is not None and stdout is not None:\n+ stdout = stdout.decode(encoding)\n+ if encoding is not None and stderr is not None:\n+ stderr = stderr.decode(encoding)\n+ returncode = proc.returncode\n \n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n \n- return proc.returncode, stdout, stderr\n+ return returncode, stdout, stderr\n \n \n def rmtree(path):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py\\n--- a/pre_commit/parse_shebang.py\\n+++ b/pre_commit/parse_shebang.py\\n@@ -12,6 +12,10 @@\\n printable = frozenset(string.printable)\\n \\n \\n+class ExecutableNotFoundError(OSError):\\n+ pass\\n+\\n+\\n def parse_bytesio(bytesio):\\n \\\"\\\"\\\"Parse the shebang from a file opened for reading binary.\\\"\\\"\\\"\\n if bytesio.read(2) != b'#!':\\n@@ -70,7 +74,9 @@\\n if os.sep not in orig_exe:\\n exe = find_executable(orig_exe)\\n if exe is None:\\n- raise OSError('Executable {0} not found'.format(orig_exe))\\n+ raise ExecutableNotFoundError(\\n+ 'Executable `{0}` not found'.format(orig_exe),\\n+ )\\n return exe\\n else:\\n return orig_exe\\ndiff --git a/pre_commit/util.py b/pre_commit/util.py\\n--- a/pre_commit/util.py\\n+++ b/pre_commit/util.py\\n@@ -181,23 +181,26 @@\\n for key, value in kwargs.pop('env', {}).items()\\n ) or None\\n \\n- cmd = parse_shebang.normalize_cmd(cmd)\\n-\\n- popen_kwargs.update(kwargs)\\n- proc = __popen(cmd, **popen_kwargs)\\n- stdout, stderr = proc.communicate()\\n- if encoding is not None and stdout is not None:\\n- stdout = stdout.decode(encoding)\\n- if encoding is not None and stderr is not None:\\n- stderr = stderr.decode(encoding)\\n- returncode = proc.returncode\\n+ try:\\n+ cmd = parse_shebang.normalize_cmd(cmd)\\n+ except parse_shebang.ExecutableNotFoundError as e:\\n+ returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'')\\n+ else:\\n+ popen_kwargs.update(kwargs)\\n+ proc = __popen(cmd, **popen_kwargs)\\n+ stdout, stderr = proc.communicate()\\n+ if encoding is not None and stdout is not None:\\n+ stdout = stdout.decode(encoding)\\n+ if encoding is not None and stderr is not None:\\n+ stderr = stderr.decode(encoding)\\n+ returncode = proc.returncode\\n \\n if retcode is not None and retcode != returncode:\\n raise CalledProcessError(\\n returncode, cmd, retcode, output=(stdout, stderr),\\n )\\n \\n- return proc.returncode, stdout, stderr\\n+ return returncode, stdout, stderr\\n \\n \\n def rmtree(path):\\n\", \"issue\": \"Not-found executable crashes framework\\nThis was introduced with the new exe logic in 0.8.0\\n\\nHere's a simple reproduction:\\n\\n``` yaml\\n- repo: local\\n hooks:\\n - id: test\\n name: test\\n language: system\\n entry: i-dont-exist-lol\\n files: '\\\\.py$'\\n```\\n\\n```\\n$ pre-commit run --all-files\\ntest.....................................................................An unexpected error has occurred: OSError: Executable i-dont-exist-lol not found\\nCheck the log at ~/.pre-commit/pre-commit.log\\n```\\n\\n\", \"before_files\": [{\"content\": \"from __future__ import unicode_literals\\n\\nimport contextlib\\nimport errno\\nimport functools\\nimport os\\nimport os.path\\nimport shutil\\nimport stat\\nimport subprocess\\nimport tarfile\\nimport tempfile\\n\\nimport pkg_resources\\n\\nfrom pre_commit import five\\nfrom pre_commit import parse_shebang\\n\\n\\n@contextlib.contextmanager\\ndef cwd(path):\\n original_cwd = os.getcwd()\\n os.chdir(path)\\n try:\\n yield\\n finally:\\n os.chdir(original_cwd)\\n\\n\\ndef mkdirp(path):\\n try:\\n os.makedirs(path)\\n except OSError:\\n if not os.path.exists(path):\\n raise\\n\\n\\ndef memoize_by_cwd(func):\\n \\\"\\\"\\\"Memoize a function call based on os.getcwd().\\\"\\\"\\\"\\n @functools.wraps(func)\\n def wrapper(*args):\\n cwd = os.getcwd()\\n key = (cwd,) + args\\n try:\\n return wrapper._cache[key]\\n except KeyError:\\n ret = wrapper._cache[key] = func(*args)\\n return ret\\n\\n wrapper._cache = {}\\n\\n return wrapper\\n\\n\\n@contextlib.contextmanager\\ndef clean_path_on_failure(path):\\n \\\"\\\"\\\"Cleans up the directory on an exceptional failure.\\\"\\\"\\\"\\n try:\\n yield\\n except BaseException:\\n if os.path.exists(path):\\n rmtree(path)\\n raise\\n\\n\\n@contextlib.contextmanager\\ndef noop_context():\\n yield\\n\\n\\ndef no_git_env():\\n # Too many bugs dealing with environment variables and GIT:\\n # https://github.com/pre-commit/pre-commit/issues/300\\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\\n # pre-commit hooks\\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\\n # while running pre-commit hooks in submodules.\\n # GIT_DIR: Causes git clone to clone wrong thing\\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\\n return dict(\\n (k, v) for k, v in os.environ.items() if not k.startswith('GIT_')\\n )\\n\\n\\n@contextlib.contextmanager\\ndef tarfile_open(*args, **kwargs):\\n \\\"\\\"\\\"Compatibility layer because python2.6\\\"\\\"\\\"\\n tf = tarfile.open(*args, **kwargs)\\n try:\\n yield tf\\n finally:\\n tf.close()\\n\\n\\n@contextlib.contextmanager\\ndef tmpdir():\\n \\\"\\\"\\\"Contextmanager to create a temporary directory. It will be cleaned up\\n afterwards.\\n \\\"\\\"\\\"\\n tempdir = tempfile.mkdtemp()\\n try:\\n yield tempdir\\n finally:\\n rmtree(tempdir)\\n\\n\\ndef resource_filename(filename):\\n return pkg_resources.resource_filename(\\n 'pre_commit',\\n os.path.join('resources', filename),\\n )\\n\\n\\ndef make_executable(filename):\\n original_mode = os.stat(filename).st_mode\\n os.chmod(\\n filename,\\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\\n )\\n\\n\\nclass CalledProcessError(RuntimeError):\\n def __init__(self, returncode, cmd, expected_returncode, output=None):\\n super(CalledProcessError, self).__init__(\\n returncode, cmd, expected_returncode, output,\\n )\\n self.returncode = returncode\\n self.cmd = cmd\\n self.expected_returncode = expected_returncode\\n self.output = output\\n\\n def to_bytes(self):\\n output = []\\n for maybe_text in self.output:\\n if maybe_text:\\n output.append(\\n b'\\\\n ' +\\n five.to_bytes(maybe_text).replace(b'\\\\n', b'\\\\n ')\\n )\\n else:\\n output.append(b'(none)')\\n\\n return b''.join((\\n five.to_bytes(\\n 'Command: {0!r}\\\\n'\\n 'Return code: {1}\\\\n'\\n 'Expected return code: {2}\\\\n'.format(\\n self.cmd, self.returncode, self.expected_returncode\\n )\\n ),\\n b'Output: ', output[0], b'\\\\n',\\n b'Errors: ', output[1], b'\\\\n',\\n ))\\n\\n def to_text(self):\\n return self.to_bytes().decode('UTF-8')\\n\\n if five.PY3: # pragma: no cover (py3)\\n __bytes__ = to_bytes\\n __str__ = to_text\\n else: # pragma: no cover (py2)\\n __str__ = to_bytes\\n __unicode__ = to_text\\n\\n\\ndef cmd_output(*cmd, **kwargs):\\n retcode = kwargs.pop('retcode', 0)\\n encoding = kwargs.pop('encoding', 'UTF-8')\\n __popen = kwargs.pop('__popen', subprocess.Popen)\\n\\n popen_kwargs = {\\n 'stdin': subprocess.PIPE,\\n 'stdout': subprocess.PIPE,\\n 'stderr': subprocess.PIPE,\\n }\\n\\n # py2/py3 on windows are more strict about the types here\\n cmd = tuple(five.n(arg) for arg in cmd)\\n kwargs['env'] = dict(\\n (five.n(key), five.n(value))\\n for key, value in kwargs.pop('env', {}).items()\\n ) or None\\n\\n cmd = parse_shebang.normalize_cmd(cmd)\\n\\n popen_kwargs.update(kwargs)\\n proc = __popen(cmd, **popen_kwargs)\\n stdout, stderr = proc.communicate()\\n if encoding is not None and stdout is not None:\\n stdout = stdout.decode(encoding)\\n if encoding is not None and stderr is not None:\\n stderr = stderr.decode(encoding)\\n returncode = proc.returncode\\n\\n if retcode is not None and retcode != returncode:\\n raise CalledProcessError(\\n returncode, cmd, retcode, output=(stdout, stderr),\\n )\\n\\n return proc.returncode, stdout, stderr\\n\\n\\ndef rmtree(path):\\n \\\"\\\"\\\"On windows, rmtree fails for readonly dirs.\\\"\\\"\\\"\\n def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\\n excvalue = exc[1]\\n if (\\n func in (os.rmdir, os.remove, os.unlink) and\\n excvalue.errno == errno.EACCES\\n ):\\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\\n func(path)\\n else:\\n raise\\n shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\\n\", \"path\": \"pre_commit/util.py\"}, {\"content\": \"from __future__ import absolute_import\\nfrom __future__ import unicode_literals\\n\\nimport io\\nimport os.path\\nimport shlex\\nimport string\\n\\nfrom pre_commit import five\\n\\n\\nprintable = frozenset(string.printable)\\n\\n\\ndef parse_bytesio(bytesio):\\n \\\"\\\"\\\"Parse the shebang from a file opened for reading binary.\\\"\\\"\\\"\\n if bytesio.read(2) != b'#!':\\n return ()\\n first_line = bytesio.readline()\\n try:\\n first_line = first_line.decode('US-ASCII')\\n except UnicodeDecodeError:\\n return ()\\n\\n # Require only printable ascii\\n for c in first_line:\\n if c not in printable:\\n return ()\\n\\n # shlex.split is horribly broken in py26 on text strings\\n cmd = tuple(shlex.split(five.n(first_line)))\\n if cmd[0] == '/usr/bin/env':\\n cmd = cmd[1:]\\n return cmd\\n\\n\\ndef parse_filename(filename):\\n \\\"\\\"\\\"Parse the shebang given a filename.\\\"\\\"\\\"\\n if not os.path.exists(filename) or not os.access(filename, os.X_OK):\\n return ()\\n\\n with io.open(filename, 'rb') as f:\\n return parse_bytesio(f)\\n\\n\\ndef find_executable(exe, _environ=None):\\n exe = os.path.normpath(exe)\\n if os.sep in exe:\\n return exe\\n\\n environ = _environ if _environ is not None else os.environ\\n\\n if 'PATHEXT' in environ:\\n possible_exe_names = (exe,) + tuple(\\n exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\\n )\\n else:\\n possible_exe_names = (exe,)\\n\\n for path in environ.get('PATH', '').split(os.pathsep):\\n for possible_exe_name in possible_exe_names:\\n joined = os.path.join(path, possible_exe_name)\\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\\n return joined\\n else:\\n return None\\n\\n\\ndef normexe(orig_exe):\\n if os.sep not in orig_exe:\\n exe = find_executable(orig_exe)\\n if exe is None:\\n raise OSError('Executable {0} not found'.format(orig_exe))\\n return exe\\n else:\\n return orig_exe\\n\\n\\ndef normalize_cmd(cmd):\\n \\\"\\\"\\\"Fixes for the following issues on windows\\n - http://bugs.python.org/issue8557\\n - windows does not parse shebangs\\n\\n This function also makes deep-path shebangs work just fine\\n \\\"\\\"\\\"\\n # Use PATH to determine the executable\\n exe = normexe(cmd[0])\\n\\n # Figure out the shebang from the resulting command\\n cmd = parse_filename(exe) + (exe,) + cmd[1:]\\n\\n # This could have given us back another bare executable\\n exe = normexe(cmd[0])\\n\\n return (exe,) + cmd[1:]\\n\", \"path\": \"pre_commit/parse_shebang.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3419,"string":"3,419"},"num_tokens_diff":{"kind":"number","value":569,"string":"569"}}},{"rowIdx":18190,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_29010"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"Flexget__Flexget-2284"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\ndescargas2020 plugin broken\n### Expected behaviour:\r\nit downloads torrent properly.\r\n\r\n### Actual behaviour:\r\n\r\n### Steps to reproduce:\r\nadd this rss http://feeds.feedburner.com/descargas2020new\r\nflexget execute\r\nyou will get:\r\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\r\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\r\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\r\n\r\n#### Config:\r\n\r\n```\r\ntasks:\r\n series_sigo:\r\n rss:\r\n url: http://feeds.feedburner.com/descargas2020new\r\n link: link\r\n all_series: yes\r\n seen: local\r\n regexp:\r\n reject:\r\n - PCDVD\r\n accept_all: yes\r\n thetvdb_lookup: yes\r\n no_entries_ok: yes\r\n set:\r\n filename: \"{{title|pathscrub}}.torrent\"\r\n download: /home/flexget/torrent/\r\n```\r\n \r\n#### Log:\r\n\r\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\r\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\r\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\r\n\r\n```\r\npaste log output here\r\n```\r\n\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.17.20\r\n- Python version: 2.7.13\r\n- Installation method: i don't remember\r\n- Using daemon (yes/no): no\r\n- OS and version: debian 9 x64\r\n- Link to crash log:\r\n\r\n\r\n\n\n\n\n[start of flexget/plugins/sites/descargas2020.py]\n1 from __future__ import unicode_literals, division, absolute_import\n2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n3 \n4 import logging\n5 import re\n6 \n7 from flexget import plugin\n8 from flexget.event import event\n9 from flexget.plugins.internal.urlrewriting import UrlRewritingError\n10 from flexget.utils.requests import Session, TimedLimiter\n11 from flexget.utils.soup import get_soup\n12 from flexget.utils import requests\n13 \n14 from flexget.entry import Entry\n15 from flexget.utils.search import normalize_unicode\n16 \n17 import unicodedata\n18 \n19 log = logging.getLogger('descargas2020')\n20 \n21 requests = Session()\n22 requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\n23 requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n24 \n25 DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\n26 \n27 class UrlRewriteDescargas2020(object):\n28 \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n29 \n30 schema = {\n31 'type': 'boolean',\n32 'default': False\n33 }\n34 \n35 # urlrewriter API\n36 def url_rewritable(self, task, entry):\n37 url = entry['url']\n38 rewritable_regex = '^http:\\/\\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\\/.*'\n39 return re.match(rewritable_regex, url) and not url.endswith('.torrent')\n40 \n41 # urlrewriter API\n42 def url_rewrite(self, task, entry):\n43 entry['url'] = self.parse_download_page(entry['url'], task)\n44 \n45 @plugin.internet(log)\n46 def parse_download_page(self, url, task):\n47 log.verbose('Descargas2020 URL: %s', url)\n48 \n49 try:\n50 page = requests.get(url)\n51 except requests.exceptions.RequestException as e:\n52 raise UrlRewritingError(e)\n53 try:\n54 soup = get_soup(page.text)\n55 except Exception as e:\n56 raise UrlRewritingError(e)\n57 \n58 torrent_id = None\n59 url_format = DESCARGAS2020_TORRENT_FORMAT\n60 \n61 torrent_id_prog = re.compile(\"(?:parametros\\s*=\\s*\\n?)\\s*{\\s*\\n(?:\\s*'\\w+'\\s*:.*\\n)+\\s*'(?:torrentID|id)\"\n62 \"'\\s*:\\s*'(\\d+)'\")\n63 torrent_ids = soup.findAll(text=torrent_id_prog)\n64 if torrent_ids:\n65 match = torrent_id_prog.search(torrent_ids[0])\n66 if match:\n67 torrent_id = match.group(1)\n68 if not torrent_id:\n69 log.debug('torrent ID not found, searching openTorrent script')\n70 torrent_id_prog = re.compile('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\"(.*\\/\\d+_-.*[^\\/])\\/?\\\";')\n71 torrent_ids = soup.findAll(text=torrent_id_prog)\n72 if torrent_ids:\n73 match = torrent_id_prog.search(torrent_ids[0])\n74 if match:\n75 torrent_id = match.group(2)\n76 return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\n77 \n78 if not torrent_id:\n79 raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n80 \n81 return url_format.format(torrent_id)\n82 \n83 def search(self, task, entry, config=None):\n84 if not config:\n85 log.debug('Descargas2020 disabled')\n86 return set()\n87 log.debug('Search Descargas2020')\n88 url_search = 'http://descargas2020.com/buscar'\n89 results = set()\n90 for search_string in entry.get('search_strings', [entry['title']]):\n91 query = normalize_unicode(search_string)\n92 query = re.sub(' \\(\\d\\d\\d\\d\\)$', '', query)\n93 log.debug('Searching Descargas2020 %s', query)\n94 query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')\n95 data = {'q': query}\n96 try:\n97 response = task.requests.post(url_search, data=data)\n98 except requests.RequestException as e:\n99 log.error('Error searching Descargas2020: %s', e)\n100 return results\n101 content = response.content\n102 soup = get_soup(content)\n103 soup2 = soup.find('ul', attrs={'class': 'buscar-list'})\n104 children = soup2.findAll('a', href=True)\n105 for child in children:\n106 entry = Entry()\n107 entry['url'] = child['href']\n108 entry_title = child.find('h2')\n109 if entry_title is None:\n110 log.debug('Ignore empty entry')\n111 continue\n112 entry_title = entry_title.text\n113 if not entry_title:\n114 continue\n115 try:\n116 entry_quality_lan = re.search('.+ \\[([^\\]]+)\\](\\[[^\\]]+\\])+$', entry_title).group(1)\n117 except AttributeError:\n118 log.debug('Quality not found')\n119 continue\n120 entry_title = re.sub(' \\[.+]$', '', entry_title)\n121 entry['title'] = entry_title + ' ' + entry_quality_lan\n122 results.add(entry)\n123 log.debug('Finish search Descargas2020 with %d entries', len(results))\n124 return results\n125 \n126 @event('plugin.register')\n127 def register_plugin():\n128 plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2)\n129\n[end of flexget/plugins/sites/descargas2020.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/flexget/plugins/sites/descargas2020.py b/flexget/plugins/sites/descargas2020.py\n--- a/flexget/plugins/sites/descargas2020.py\n+++ b/flexget/plugins/sites/descargas2020.py\n@@ -22,7 +22,7 @@\n requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\n requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n \n-DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\n+DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'\n \n class UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n@@ -67,13 +67,10 @@\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n- torrent_id_prog = re.compile('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\"(.*\\/\\d+_-.*[^\\/])\\/?\\\";')\n- torrent_ids = soup.findAll(text=torrent_id_prog)\n- if torrent_ids:\n- match = torrent_id_prog.search(torrent_ids[0])\n- if match:\n- torrent_id = match.group(2)\n- return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\n+ match = re.search('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\".*\\/(\\d+.*)\\\";',\n+ page.text, re.MULTILINE)\n+ if match:\n+ torrent_id = match.group(2).rstrip('/')\n \n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/flexget/plugins/sites/descargas2020.py b/flexget/plugins/sites/descargas2020.py\\n--- a/flexget/plugins/sites/descargas2020.py\\n+++ b/flexget/plugins/sites/descargas2020.py\\n@@ -22,7 +22,7 @@\\n requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\\n requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\\n \\n-DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\\n+DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'\\n \\n class UrlRewriteDescargas2020(object):\\n \\\"\\\"\\\"Descargas2020 urlrewriter and search.\\\"\\\"\\\"\\n@@ -67,13 +67,10 @@\\n torrent_id = match.group(1)\\n if not torrent_id:\\n log.debug('torrent ID not found, searching openTorrent script')\\n- torrent_id_prog = re.compile('function openTorrent.*\\\\n.*\\\\{.*(\\\\n.*)+window\\\\.location\\\\.href =\\\\s*\\\\\\\"(.*\\\\/\\\\d+_-.*[^\\\\/])\\\\/?\\\\\\\";')\\n- torrent_ids = soup.findAll(text=torrent_id_prog)\\n- if torrent_ids:\\n- match = torrent_id_prog.search(torrent_ids[0])\\n- if match:\\n- torrent_id = match.group(2)\\n- return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\\n+ match = re.search('function openTorrent.*\\\\n.*\\\\{.*(\\\\n.*)+window\\\\.location\\\\.href =\\\\s*\\\\\\\".*\\\\/(\\\\d+.*)\\\\\\\";',\\n+ page.text, re.MULTILINE)\\n+ if match:\\n+ torrent_id = match.group(2).rstrip('/')\\n \\n if not torrent_id:\\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\\n\", \"issue\": \"descargas2020 plugin broken\\n### Expected behaviour:\\r\\nit downloads torrent properly.\\r\\n\\r\\n### Actual behaviour:\\r\\n\\r\\n### Steps to reproduce:\\r\\nadd this rss http://feeds.feedburner.com/descargas2020new\\r\\nflexget execute\\r\\nyou will get:\\r\\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\\r\\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\\r\\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\\r\\n\\r\\n#### Config:\\r\\n\\r\\n```\\r\\ntasks:\\r\\n series_sigo:\\r\\n rss:\\r\\n url: http://feeds.feedburner.com/descargas2020new\\r\\n link: link\\r\\n all_series: yes\\r\\n seen: local\\r\\n regexp:\\r\\n reject:\\r\\n - PCDVD\\r\\n accept_all: yes\\r\\n thetvdb_lookup: yes\\r\\n no_entries_ok: yes\\r\\n set:\\r\\n filename: \\\"{{title|pathscrub}}.torrent\\\"\\r\\n download: /home/flexget/torrent/\\r\\n```\\r\\n \\r\\n#### Log:\\r\\n\\r\\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\\r\\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\\r\\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\\r\\n\\r\\n```\\r\\npaste log output here\\r\\n```\\r\\n\\r\\n\\r\\n### Additional information:\\r\\n\\r\\n- FlexGet version: 2.17.20\\r\\n- Python version: 2.7.13\\r\\n- Installation method: i don't remember\\r\\n- Using daemon (yes/no): no\\r\\n- OS and version: debian 9 x64\\r\\n- Link to crash log:\\r\\n\\r\\n\\r\\n\\n\", \"before_files\": [{\"content\": \"from __future__ import unicode_literals, division, absolute_import\\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\\n\\nimport logging\\nimport re\\n\\nfrom flexget import plugin\\nfrom flexget.event import event\\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\\nfrom flexget.utils.requests import Session, TimedLimiter\\nfrom flexget.utils.soup import get_soup\\nfrom flexget.utils import requests\\n\\nfrom flexget.entry import Entry\\nfrom flexget.utils.search import normalize_unicode\\n\\nimport unicodedata\\n\\nlog = logging.getLogger('descargas2020')\\n\\nrequests = Session()\\nrequests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\\nrequests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\\n\\nDESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\\n\\nclass UrlRewriteDescargas2020(object):\\n \\\"\\\"\\\"Descargas2020 urlrewriter and search.\\\"\\\"\\\"\\n\\n schema = {\\n 'type': 'boolean',\\n 'default': False\\n }\\n\\n # urlrewriter API\\n def url_rewritable(self, task, entry):\\n url = entry['url']\\n rewritable_regex = '^http:\\\\/\\\\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\\\\/.*'\\n return re.match(rewritable_regex, url) and not url.endswith('.torrent')\\n\\n # urlrewriter API\\n def url_rewrite(self, task, entry):\\n entry['url'] = self.parse_download_page(entry['url'], task)\\n\\n @plugin.internet(log)\\n def parse_download_page(self, url, task):\\n log.verbose('Descargas2020 URL: %s', url)\\n\\n try:\\n page = requests.get(url)\\n except requests.exceptions.RequestException as e:\\n raise UrlRewritingError(e)\\n try:\\n soup = get_soup(page.text)\\n except Exception as e:\\n raise UrlRewritingError(e)\\n\\n torrent_id = None\\n url_format = DESCARGAS2020_TORRENT_FORMAT\\n\\n torrent_id_prog = re.compile(\\\"(?:parametros\\\\s*=\\\\s*\\\\n?)\\\\s*{\\\\s*\\\\n(?:\\\\s*'\\\\w+'\\\\s*:.*\\\\n)+\\\\s*'(?:torrentID|id)\\\"\\n \\\"'\\\\s*:\\\\s*'(\\\\d+)'\\\")\\n torrent_ids = soup.findAll(text=torrent_id_prog)\\n if torrent_ids:\\n match = torrent_id_prog.search(torrent_ids[0])\\n if match:\\n torrent_id = match.group(1)\\n if not torrent_id:\\n log.debug('torrent ID not found, searching openTorrent script')\\n torrent_id_prog = re.compile('function openTorrent.*\\\\n.*\\\\{.*(\\\\n.*)+window\\\\.location\\\\.href =\\\\s*\\\\\\\"(.*\\\\/\\\\d+_-.*[^\\\\/])\\\\/?\\\\\\\";')\\n torrent_ids = soup.findAll(text=torrent_id_prog)\\n if torrent_ids:\\n match = torrent_id_prog.search(torrent_ids[0])\\n if match:\\n torrent_id = match.group(2)\\n return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\\n\\n if not torrent_id:\\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\\n\\n return url_format.format(torrent_id)\\n\\n def search(self, task, entry, config=None):\\n if not config:\\n log.debug('Descargas2020 disabled')\\n return set()\\n log.debug('Search Descargas2020')\\n url_search = 'http://descargas2020.com/buscar'\\n results = set()\\n for search_string in entry.get('search_strings', [entry['title']]):\\n query = normalize_unicode(search_string)\\n query = re.sub(' \\\\(\\\\d\\\\d\\\\d\\\\d\\\\)$', '', query)\\n log.debug('Searching Descargas2020 %s', query)\\n query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')\\n data = {'q': query}\\n try:\\n response = task.requests.post(url_search, data=data)\\n except requests.RequestException as e:\\n log.error('Error searching Descargas2020: %s', e)\\n return results\\n content = response.content\\n soup = get_soup(content)\\n soup2 = soup.find('ul', attrs={'class': 'buscar-list'})\\n children = soup2.findAll('a', href=True)\\n for child in children:\\n entry = Entry()\\n entry['url'] = child['href']\\n entry_title = child.find('h2')\\n if entry_title is None:\\n log.debug('Ignore empty entry')\\n continue\\n entry_title = entry_title.text\\n if not entry_title:\\n continue\\n try:\\n entry_quality_lan = re.search('.+ \\\\[([^\\\\]]+)\\\\](\\\\[[^\\\\]]+\\\\])+$', entry_title).group(1)\\n except AttributeError:\\n log.debug('Quality not found')\\n continue\\n entry_title = re.sub(' \\\\[.+]$', '', entry_title)\\n entry['title'] = entry_title + ' ' + entry_quality_lan\\n results.add(entry)\\n log.debug('Finish search Descargas2020 with %d entries', len(results))\\n return results\\n\\n@event('plugin.register')\\ndef register_plugin():\\n plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2)\\n\", \"path\": \"flexget/plugins/sites/descargas2020.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":3026,"string":"3,026"},"num_tokens_diff":{"kind":"number","value":472,"string":"472"}}},{"rowIdx":18191,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_2192"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"LMFDB__lmfdb-5179"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nPIP dependencies\nWe have several deprecated dependencies that we should fix ASAP\r\n```\r\nflask<=1.1.4\r\nmarkupsafe<=2.0.1\r\nitsdangerous<=2.0.1\r\n```\r\n\r\nin particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis. \n\n\n\n[start of lmfdb/local_fields/__init__.py]\n1 # -*- coding: utf-8 -*-\n2 from lmfdb.app import app\n3 from lmfdb.logger import make_logger\n4 from flask import Blueprint, request, redirect\n5 \n6 local_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\n7 logger = make_logger(local_fields_page)\n8 \n9 \n10 @local_fields_page.context_processor\n11 def body_class():\n12 return {'body_class': 'local_fields'}\n13 \n14 from . import main\n15 assert main\n16 \n17 from urllib.parse import urlparse, urlunparse\n18 \n19 \n20 @local_fields_page.before_request\n21 def redirect_local():\n22 urlparts = urlparse(request.url)\n23 if 'LocalNumberField' in urlparts.path:\n24 urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n25 return redirect(urlunparse(urlparts), 301)\n26 return\n27 \n28 \n29 app.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n30 app.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n31 \n32 # API2 has been disabled for now\n33 #from lmfdb.api2.searchers import register_search_function\n34 #register_search_function(\n35 # \"$p$-adic_fields\",\n36 # \"$p$-adic fields\",\n37 # \"Search over $p$-adic fields\",\n38 # auto_search = 'lf_fields'\n39 #)\n40\n[end of lmfdb/local_fields/__init__.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py\n--- a/lmfdb/local_fields/__init__.py\n+++ b/lmfdb/local_fields/__init__.py\n@@ -27,7 +27,6 @@\n \n \n app.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n-app.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n \n # API2 has been disabled for now\n #from lmfdb.api2.searchers import register_search_function\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py\\n--- a/lmfdb/local_fields/__init__.py\\n+++ b/lmfdb/local_fields/__init__.py\\n@@ -27,7 +27,6 @@\\n \\n \\n app.register_blueprint(local_fields_page, url_prefix=\\\"/padicField\\\")\\n-app.register_blueprint(local_fields_page, url_prefix=\\\"/LocalNumberField\\\")\\n \\n # API2 has been disabled for now\\n #from lmfdb.api2.searchers import register_search_function\\n\", \"issue\": \"PIP dependencies\\nWe have several deprecated dependencies that we should fix ASAP\\r\\n```\\r\\nflask<=1.1.4\\r\\nmarkupsafe<=2.0.1\\r\\nitsdangerous<=2.0.1\\r\\n```\\r\\n\\r\\nin particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis. \\n\", \"before_files\": [{\"content\": \"# -*- coding: utf-8 -*-\\nfrom lmfdb.app import app\\nfrom lmfdb.logger import make_logger\\nfrom flask import Blueprint, request, redirect\\n\\nlocal_fields_page = Blueprint(\\\"local_fields\\\", __name__, template_folder='templates', static_folder=\\\"static\\\")\\nlogger = make_logger(local_fields_page)\\n\\n\\n@local_fields_page.context_processor\\ndef body_class():\\n return {'body_class': 'local_fields'}\\n\\nfrom . import main\\nassert main\\n\\nfrom urllib.parse import urlparse, urlunparse\\n\\n\\n@local_fields_page.before_request\\ndef redirect_local():\\n urlparts = urlparse(request.url)\\n if 'LocalNumberField' in urlparts.path:\\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\\n return redirect(urlunparse(urlparts), 301)\\n return\\n\\n\\napp.register_blueprint(local_fields_page, url_prefix=\\\"/padicField\\\")\\napp.register_blueprint(local_fields_page, url_prefix=\\\"/LocalNumberField\\\")\\n\\n# API2 has been disabled for now\\n#from lmfdb.api2.searchers import register_search_function\\n#register_search_function(\\n# \\\"$p$-adic_fields\\\",\\n# \\\"$p$-adic fields\\\",\\n# \\\"Search over $p$-adic fields\\\",\\n# auto_search = 'lf_fields'\\n#)\\n\", \"path\": \"lmfdb/local_fields/__init__.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":976,"string":"976"},"num_tokens_diff":{"kind":"number","value":113,"string":"113"}}},{"rowIdx":18192,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_2958"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"learningequality__kolibri-4689"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nShows sorry! something went wrong.\n### Observed behavior\r\nWhen coach is going to the recent tab to see exercise and video progress then it shows error.\r\n\r\n\r\n### Expected behavior\r\nIt must show progress instead of error.\r\n\r\n### Steps to reproduce\r\n1. Login with coach.\r\n2. go to the recent tab.\r\n3. Go to the exercise/video and see.\r\n\r\n### Context\r\n\r\n * Kolibri version : kolibri 0.11.0\r\n * Operating system : Ubuntu 14.04\r\n * Browser : chrome\r\n\r\n### Screenshot\r\n![1](https://user-images.githubusercontent.com/12776071/50138341-4d958180-02c4-11e9-92b7-01a9fb28acc2.png)\r\n![2](https://user-images.githubusercontent.com/12776071/50138342-4d958180-02c4-11e9-9426-fe0709d16751.png)\r\n![3](https://user-images.githubusercontent.com/12776071/50138343-4e2e1800-02c4-11e9-9ac4-e520796024ed.png)\r\n\n\n\n\n[start of kolibri/plugins/coach/api.py]\n1 import datetime\n2 \n3 from dateutil.parser import parse\n4 from django.db import connection\n5 from django.db.models import Min\n6 from django.db.models import Q\n7 from django.utils import timezone\n8 from rest_framework import mixins\n9 from rest_framework import pagination\n10 from rest_framework import permissions\n11 from rest_framework import viewsets\n12 \n13 from .serializers import ContentReportSerializer\n14 from .serializers import ContentSummarySerializer\n15 from .serializers import LessonReportSerializer\n16 from .serializers import UserReportSerializer\n17 from .utils.return_users import get_members_or_user\n18 from kolibri.core.auth.constants import collection_kinds\n19 from kolibri.core.auth.constants import role_kinds\n20 from kolibri.core.auth.models import Collection\n21 from kolibri.core.auth.models import FacilityUser\n22 from kolibri.core.content.models import ContentNode\n23 from kolibri.core.decorators import query_params_required\n24 from kolibri.core.lessons.models import Lesson\n25 from kolibri.core.logger.models import ContentSummaryLog\n26 from kolibri.core.logger.models import MasteryLog\n27 \n28 \n29 collection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])\n30 \n31 \n32 class OptionalPageNumberPagination(pagination.PageNumberPagination):\n33 \"\"\"\n34 Pagination class that allows for page number-style pagination, when requested.\n35 To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n36 `?page_size=20&page=1`\n37 \"\"\"\n38 page_size = None\n39 page_size_query_param = \"page_size\"\n40 \n41 \n42 class KolibriReportPermissions(permissions.BasePermission):\n43 \n44 # check if requesting user has permission for collection or user\n45 def has_permission(self, request, view):\n46 if isinstance(view, LessonReportViewset):\n47 report_pk = view.kwargs.get('pk', None)\n48 if report_pk is None:\n49 # If requesting list view, check if requester has coach/admin permissions on whole facility\n50 collection_kind = 'facility'\n51 collection_or_user_pk = request.user.facility_id\n52 else:\n53 # If requesting detail view, only check if requester has permissions on the Classroom\n54 collection_kind = 'classroom'\n55 collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id\n56 \n57 else:\n58 collection_kind = view.kwargs.get('collection_kind', 'user')\n59 collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n60 \n61 allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n62 try:\n63 if 'user' == collection_kind:\n64 return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n65 else:\n66 return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n67 except (FacilityUser.DoesNotExist, Collection.DoesNotExist):\n68 return False\n69 \n70 \n71 @query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)\n72 class ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n73 \n74 permission_classes = (KolibriReportPermissions,)\n75 \n76 \n77 class UserReportViewSet(ReportBaseViewSet):\n78 \n79 pagination_class = OptionalPageNumberPagination\n80 serializer_class = UserReportSerializer\n81 \n82 def get_queryset(self):\n83 assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n84 return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n85 \n86 \n87 class ContentReportViewSet(ReportBaseViewSet):\n88 \n89 pagination_class = OptionalPageNumberPagination\n90 serializer_class = ContentReportSerializer\n91 \n92 def get_queryset(self):\n93 content_node_id = self.kwargs['content_node_id']\n94 return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')\n95 \n96 \n97 @query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)\n98 class ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):\n99 \n100 permission_classes = (KolibriReportPermissions,)\n101 serializer_class = ContentSummarySerializer\n102 \n103 def get_queryset(self):\n104 channel_id = self.kwargs['channel_id']\n105 return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\n106 \n107 \n108 class RecentReportViewSet(ReportBaseViewSet):\n109 \n110 pagination_class = OptionalPageNumberPagination\n111 serializer_class = ContentReportSerializer\n112 \n113 def get_queryset(self):\n114 channel_id = self.kwargs['channel_id']\n115 attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n116 query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n117 if self.request.query_params.get('last_active_time'):\n118 # Last active time specified\n119 datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n120 else:\n121 datetime_cutoff = timezone.now() - datetime.timedelta(7)\n122 # Set on the kwargs to pass into the serializer\n123 self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n124 recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n125 Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n126 user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),\n127 end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)\n128 if connection.vendor == 'postgresql':\n129 pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(\n130 channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)\n131 else:\n132 # note from rtibbles:\n133 # As good as either I or jamalex could come up with to ensure that we only return\n134 # unique content_id'ed ContentNodes from the coach recent report endpoint.\n135 # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only\n136 pks_with_unique_content_ids = ContentNode.objects.filter(\n137 channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(\n138 pk=Min('pk')).values_list('pk', flat=True)\n139 return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')\n140 \n141 \n142 class LessonReportViewset(viewsets.ReadOnlyModelViewSet):\n143 permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)\n144 serializer_class = LessonReportSerializer\n145 queryset = Lesson.objects.all()\n146\n[end of kolibri/plugins/coach/api.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py\n--- a/kolibri/plugins/coach/api.py\n+++ b/kolibri/plugins/coach/api.py\n@@ -102,7 +102,7 @@\n \n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n- return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\n+ return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft')\n \n \n class RecentReportViewSet(ReportBaseViewSet):\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py\\n--- a/kolibri/plugins/coach/api.py\\n+++ b/kolibri/plugins/coach/api.py\\n@@ -102,7 +102,7 @@\\n \\n def get_queryset(self):\\n channel_id = self.kwargs['channel_id']\\n- return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\\n+ return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft')\\n \\n \\n class RecentReportViewSet(ReportBaseViewSet):\\n\", \"issue\": \"Shows sorry! something went wrong.\\n### Observed behavior\\r\\nWhen coach is going to the recent tab to see exercise and video progress then it shows error.\\r\\n\\r\\n\\r\\n### Expected behavior\\r\\nIt must show progress instead of error.\\r\\n\\r\\n### Steps to reproduce\\r\\n1. Login with coach.\\r\\n2. go to the recent tab.\\r\\n3. Go to the exercise/video and see.\\r\\n\\r\\n### Context\\r\\n\\r\\n * Kolibri version : kolibri 0.11.0\\r\\n * Operating system : Ubuntu 14.04\\r\\n * Browser : chrome\\r\\n\\r\\n### Screenshot\\r\\n![1](https://user-images.githubusercontent.com/12776071/50138341-4d958180-02c4-11e9-92b7-01a9fb28acc2.png)\\r\\n![2](https://user-images.githubusercontent.com/12776071/50138342-4d958180-02c4-11e9-9426-fe0709d16751.png)\\r\\n![3](https://user-images.githubusercontent.com/12776071/50138343-4e2e1800-02c4-11e9-9ac4-e520796024ed.png)\\r\\n\\n\", \"before_files\": [{\"content\": \"import datetime\\n\\nfrom dateutil.parser import parse\\nfrom django.db import connection\\nfrom django.db.models import Min\\nfrom django.db.models import Q\\nfrom django.utils import timezone\\nfrom rest_framework import mixins\\nfrom rest_framework import pagination\\nfrom rest_framework import permissions\\nfrom rest_framework import viewsets\\n\\nfrom .serializers import ContentReportSerializer\\nfrom .serializers import ContentSummarySerializer\\nfrom .serializers import LessonReportSerializer\\nfrom .serializers import UserReportSerializer\\nfrom .utils.return_users import get_members_or_user\\nfrom kolibri.core.auth.constants import collection_kinds\\nfrom kolibri.core.auth.constants import role_kinds\\nfrom kolibri.core.auth.models import Collection\\nfrom kolibri.core.auth.models import FacilityUser\\nfrom kolibri.core.content.models import ContentNode\\nfrom kolibri.core.decorators import query_params_required\\nfrom kolibri.core.lessons.models import Lesson\\nfrom kolibri.core.logger.models import ContentSummaryLog\\nfrom kolibri.core.logger.models import MasteryLog\\n\\n\\ncollection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])\\n\\n\\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\\n \\\"\\\"\\\"\\n Pagination class that allows for page number-style pagination, when requested.\\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\\n `?page_size=20&page=1`\\n \\\"\\\"\\\"\\n page_size = None\\n page_size_query_param = \\\"page_size\\\"\\n\\n\\nclass KolibriReportPermissions(permissions.BasePermission):\\n\\n # check if requesting user has permission for collection or user\\n def has_permission(self, request, view):\\n if isinstance(view, LessonReportViewset):\\n report_pk = view.kwargs.get('pk', None)\\n if report_pk is None:\\n # If requesting list view, check if requester has coach/admin permissions on whole facility\\n collection_kind = 'facility'\\n collection_or_user_pk = request.user.facility_id\\n else:\\n # If requesting detail view, only check if requester has permissions on the Classroom\\n collection_kind = 'classroom'\\n collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id\\n\\n else:\\n collection_kind = view.kwargs.get('collection_kind', 'user')\\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\\n\\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\\n try:\\n if 'user' == collection_kind:\\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\\n else:\\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\\n except (FacilityUser.DoesNotExist, Collection.DoesNotExist):\\n return False\\n\\n\\n@query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)\\nclass ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\\n\\n permission_classes = (KolibriReportPermissions,)\\n\\n\\nclass UserReportViewSet(ReportBaseViewSet):\\n\\n pagination_class = OptionalPageNumberPagination\\n serializer_class = UserReportSerializer\\n\\n def get_queryset(self):\\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\\n\\n\\nclass ContentReportViewSet(ReportBaseViewSet):\\n\\n pagination_class = OptionalPageNumberPagination\\n serializer_class = ContentReportSerializer\\n\\n def get_queryset(self):\\n content_node_id = self.kwargs['content_node_id']\\n return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')\\n\\n\\n@query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)\\nclass ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):\\n\\n permission_classes = (KolibriReportPermissions,)\\n serializer_class = ContentSummarySerializer\\n\\n def get_queryset(self):\\n channel_id = self.kwargs['channel_id']\\n return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\\n\\n\\nclass RecentReportViewSet(ReportBaseViewSet):\\n\\n pagination_class = OptionalPageNumberPagination\\n serializer_class = ContentReportSerializer\\n\\n def get_queryset(self):\\n channel_id = self.kwargs['channel_id']\\n attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\\n if self.request.query_params.get('last_active_time'):\\n # Last active time specified\\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\\n else:\\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\\n # Set on the kwargs to pass into the serializer\\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\\n Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\\n user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),\\n end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)\\n if connection.vendor == 'postgresql':\\n pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(\\n channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)\\n else:\\n # note from rtibbles:\\n # As good as either I or jamalex could come up with to ensure that we only return\\n # unique content_id'ed ContentNodes from the coach recent report endpoint.\\n # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only\\n pks_with_unique_content_ids = ContentNode.objects.filter(\\n channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(\\n pk=Min('pk')).values_list('pk', flat=True)\\n return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')\\n\\n\\nclass LessonReportViewset(viewsets.ReadOnlyModelViewSet):\\n permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)\\n serializer_class = LessonReportSerializer\\n queryset = Lesson.objects.all()\\n\", \"path\": \"kolibri/plugins/coach/api.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2537,"string":"2,537"},"num_tokens_diff":{"kind":"number","value":131,"string":"131"}}},{"rowIdx":18193,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_30051"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"doccano__doccano-2228"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nAdd a function to filter labels\nWhen I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.\r\n\r\nThanks for every contributor!\n\n\n\n[start of backend/examples/filters.py]\n1 from django.db.models import Count, Q\n2 from django_filters.rest_framework import BooleanFilter, FilterSet\n3 \n4 from .models import Example\n5 \n6 \n7 class ExampleFilter(FilterSet):\n8 confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n9 \n10 def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n11 queryset = queryset.annotate(\n12 num_confirm=Count(\n13 expression=field_name,\n14 filter=Q(**{f\"{field_name}__confirmed_by\": self.request.user})\n15 | Q(project__collaborative_annotation=True),\n16 )\n17 )\n18 if is_confirmed:\n19 queryset = queryset.filter(num_confirm__gte=1)\n20 else:\n21 queryset = queryset.filter(num_confirm__lte=0)\n22 return queryset\n23 \n24 class Meta:\n25 model = Example\n26 fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n27\n[end of backend/examples/filters.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/backend/examples/filters.py b/backend/examples/filters.py\n--- a/backend/examples/filters.py\n+++ b/backend/examples/filters.py\n@@ -1,11 +1,12 @@\n-from django.db.models import Count, Q\n-from django_filters.rest_framework import BooleanFilter, FilterSet\n+from django.db.models import Count, Q, QuerySet\n+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\n \n from .models import Example\n \n \n class ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n+ label = CharFilter(method=\"filter_by_label\")\n \n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n@@ -21,6 +22,35 @@\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n \n+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\n+ \"\"\"Filter examples by a given label name.\n+\n+ This performs filtering on all of the following labels at once:\n+ - categories\n+ - spans\n+ - relations\n+ - bboxes\n+ - segmentations\n+\n+ Todo: Consider project type to make filtering more efficient.\n+\n+ Args:\n+ queryset (QuerySet): QuerySet to filter.\n+ field_name (str): This equals to `label`.\n+ label (str): The label name to filter.\n+\n+ Returns:\n+ QuerySet: Filtered examples.\n+ \"\"\"\n+ queryset = queryset.filter(\n+ Q(categories__label__text=label)\n+ | Q(spans__label__text=label)\n+ | Q(relations__type__text=label)\n+ | Q(bboxes__label__text=label)\n+ | Q(segmentations__label__text=label)\n+ )\n+ return queryset\n+\n class Meta:\n model = Example\n- fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n+ fields = (\"project\", \"text\", \"created_at\", \"updated_at\", \"label\")\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/backend/examples/filters.py b/backend/examples/filters.py\\n--- a/backend/examples/filters.py\\n+++ b/backend/examples/filters.py\\n@@ -1,11 +1,12 @@\\n-from django.db.models import Count, Q\\n-from django_filters.rest_framework import BooleanFilter, FilterSet\\n+from django.db.models import Count, Q, QuerySet\\n+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\\n \\n from .models import Example\\n \\n \\n class ExampleFilter(FilterSet):\\n confirmed = BooleanFilter(field_name=\\\"states\\\", method=\\\"filter_by_state\\\")\\n+ label = CharFilter(method=\\\"filter_by_label\\\")\\n \\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\\n queryset = queryset.annotate(\\n@@ -21,6 +22,35 @@\\n queryset = queryset.filter(num_confirm__lte=0)\\n return queryset\\n \\n+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\\n+ \\\"\\\"\\\"Filter examples by a given label name.\\n+\\n+ This performs filtering on all of the following labels at once:\\n+ - categories\\n+ - spans\\n+ - relations\\n+ - bboxes\\n+ - segmentations\\n+\\n+ Todo: Consider project type to make filtering more efficient.\\n+\\n+ Args:\\n+ queryset (QuerySet): QuerySet to filter.\\n+ field_name (str): This equals to `label`.\\n+ label (str): The label name to filter.\\n+\\n+ Returns:\\n+ QuerySet: Filtered examples.\\n+ \\\"\\\"\\\"\\n+ queryset = queryset.filter(\\n+ Q(categories__label__text=label)\\n+ | Q(spans__label__text=label)\\n+ | Q(relations__type__text=label)\\n+ | Q(bboxes__label__text=label)\\n+ | Q(segmentations__label__text=label)\\n+ )\\n+ return queryset\\n+\\n class Meta:\\n model = Example\\n- fields = (\\\"project\\\", \\\"text\\\", \\\"created_at\\\", \\\"updated_at\\\")\\n+ fields = (\\\"project\\\", \\\"text\\\", \\\"created_at\\\", \\\"updated_at\\\", \\\"label\\\")\\n\", \"issue\": \"Add a function to filter labels\\nWhen I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.\\r\\n\\r\\nThanks for every contributor!\\n\", \"before_files\": [{\"content\": \"from django.db.models import Count, Q\\nfrom django_filters.rest_framework import BooleanFilter, FilterSet\\n\\nfrom .models import Example\\n\\n\\nclass ExampleFilter(FilterSet):\\n confirmed = BooleanFilter(field_name=\\\"states\\\", method=\\\"filter_by_state\\\")\\n\\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\\n queryset = queryset.annotate(\\n num_confirm=Count(\\n expression=field_name,\\n filter=Q(**{f\\\"{field_name}__confirmed_by\\\": self.request.user})\\n | Q(project__collaborative_annotation=True),\\n )\\n )\\n if is_confirmed:\\n queryset = queryset.filter(num_confirm__gte=1)\\n else:\\n queryset = queryset.filter(num_confirm__lte=0)\\n return queryset\\n\\n class Meta:\\n model = Example\\n fields = (\\\"project\\\", \\\"text\\\", \\\"created_at\\\", \\\"updated_at\\\")\\n\", \"path\": \"backend/examples/filters.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":879,"string":"879"},"num_tokens_diff":{"kind":"number","value":463,"string":"463"}}},{"rowIdx":18194,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_13543"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pre-commit__pre-commit-33"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\npre-commit -i does not install the file with +x\nNo executable = no run :'(\n\n\n\n\n[start of pre_commit/git.py]\n1 import functools\n2 import os\n3 import os.path\n4 import pkg_resources\n5 import re\n6 from plumbum import local\n7 \n8 from pre_commit.util import memoize_by_cwd\n9 \n10 \n11 def _get_root_new():\n12 path = os.getcwd()\n13 while len(path) > 1:\n14 if os.path.exists(os.path.join(path, '.git')):\n15 return path\n16 else:\n17 path = os.path.normpath(os.path.join(path, '../'))\n18 raise AssertionError('called from outside of the gits')\n19 \n20 \n21 @memoize_by_cwd\n22 def get_root():\n23 return _get_root_new()\n24 \n25 \n26 @memoize_by_cwd\n27 def get_pre_commit_path():\n28 return os.path.join(get_root(), '.git/hooks/pre-commit')\n29 \n30 \n31 def create_pre_commit():\n32 path = get_pre_commit_path()\n33 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n34 local.path(path).write(local.path(pre_commit_file).read())\n35 \n36 \n37 def remove_pre_commit():\n38 local.path(get_pre_commit_path()).delete()\n39 \n40 \n41 def get_head_sha(git_repo_path):\n42 with local.cwd(git_repo_path):\n43 return local['git']['rev-parse', 'HEAD']().strip()\n44 \n45 \n46 @memoize_by_cwd\n47 def get_staged_files():\n48 return local['git']['diff', '--staged', '--name-only']().splitlines()\n49 \n50 \n51 @memoize_by_cwd\n52 def get_all_files():\n53 return local['git']['ls-files']().splitlines()\n54 \n55 \n56 def get_files_matching(all_file_list_strategy):\n57 @functools.wraps(all_file_list_strategy)\n58 @memoize_by_cwd\n59 def wrapper(expr):\n60 regex = re.compile(expr)\n61 return set(filter(os.path.exists, (\n62 filename\n63 for filename in all_file_list_strategy()\n64 if regex.search(filename)\n65 )))\n66 return wrapper\n67 \n68 \n69 get_staged_files_matching = get_files_matching(get_staged_files)\n70 get_all_files_matching = get_files_matching(get_all_files)\n71\n[end of pre_commit/git.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -3,6 +3,7 @@\n import os.path\n import pkg_resources\n import re\n+import stat\n from plumbum import local\n \n from pre_commit.util import memoize_by_cwd\n@@ -32,6 +33,8 @@\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n+ original_mode = os.stat(path).st_mode\n+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n \n \n def remove_pre_commit():\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pre_commit/git.py b/pre_commit/git.py\\n--- a/pre_commit/git.py\\n+++ b/pre_commit/git.py\\n@@ -3,6 +3,7 @@\\n import os.path\\n import pkg_resources\\n import re\\n+import stat\\n from plumbum import local\\n \\n from pre_commit.util import memoize_by_cwd\\n@@ -32,6 +33,8 @@\\n path = get_pre_commit_path()\\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\\n local.path(path).write(local.path(pre_commit_file).read())\\n+ original_mode = os.stat(path).st_mode\\n+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\\n \\n \\n def remove_pre_commit():\\n\", \"issue\": \"pre-commit -i does not install the file with +x\\nNo executable = no run :'(\\n\\n\", \"before_files\": [{\"content\": \"import functools\\nimport os\\nimport os.path\\nimport pkg_resources\\nimport re\\nfrom plumbum import local\\n\\nfrom pre_commit.util import memoize_by_cwd\\n\\n\\ndef _get_root_new():\\n path = os.getcwd()\\n while len(path) > 1:\\n if os.path.exists(os.path.join(path, '.git')):\\n return path\\n else:\\n path = os.path.normpath(os.path.join(path, '../'))\\n raise AssertionError('called from outside of the gits')\\n\\n\\n@memoize_by_cwd\\ndef get_root():\\n return _get_root_new()\\n\\n\\n@memoize_by_cwd\\ndef get_pre_commit_path():\\n return os.path.join(get_root(), '.git/hooks/pre-commit')\\n\\n\\ndef create_pre_commit():\\n path = get_pre_commit_path()\\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\\n local.path(path).write(local.path(pre_commit_file).read())\\n\\n\\ndef remove_pre_commit():\\n local.path(get_pre_commit_path()).delete()\\n\\n\\ndef get_head_sha(git_repo_path):\\n with local.cwd(git_repo_path):\\n return local['git']['rev-parse', 'HEAD']().strip()\\n\\n\\n@memoize_by_cwd\\ndef get_staged_files():\\n return local['git']['diff', '--staged', '--name-only']().splitlines()\\n\\n\\n@memoize_by_cwd\\ndef get_all_files():\\n return local['git']['ls-files']().splitlines()\\n\\n\\ndef get_files_matching(all_file_list_strategy):\\n @functools.wraps(all_file_list_strategy)\\n @memoize_by_cwd\\n def wrapper(expr):\\n regex = re.compile(expr)\\n return set(filter(os.path.exists, (\\n filename\\n for filename in all_file_list_strategy()\\n if regex.search(filename)\\n )))\\n return wrapper\\n\\n\\nget_staged_files_matching = get_files_matching(get_staged_files)\\nget_all_files_matching = get_files_matching(get_all_files)\\n\", \"path\": \"pre_commit/git.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1108,"string":"1,108"},"num_tokens_diff":{"kind":"number","value":170,"string":"170"}}},{"rowIdx":18195,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_37725"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"pwndbg__pwndbg-291"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nTypesetting seems to be wrong\n![image](https://cloud.githubusercontent.com/assets/7897423/24843862/42e004bc-1dd9-11e7-8447-b8cf87ed6fe1.png)\r\n\n\n\n\n[start of pwndbg/__init__.py]\n1 #!/usr/bin/env python\n2 # -*- coding: utf-8 -*-\n3 from __future__ import absolute_import\n4 from __future__ import division\n5 from __future__ import print_function\n6 from __future__ import unicode_literals\n7 \n8 import gdb\n9 \n10 import pwndbg.android\n11 import pwndbg.arch\n12 import pwndbg.arguments\n13 import pwndbg.argv\n14 import pwndbg.color\n15 import pwndbg.commands\n16 import pwndbg.commands.argv\n17 import pwndbg.commands.aslr\n18 import pwndbg.commands.auxv\n19 import pwndbg.commands.checksec\n20 import pwndbg.commands.config\n21 import pwndbg.commands.context\n22 import pwndbg.commands.cpsr\n23 import pwndbg.commands.dt\n24 import pwndbg.commands.dumpargs\n25 import pwndbg.commands.elf\n26 import pwndbg.commands.gdbinit\n27 import pwndbg.commands.got\n28 import pwndbg.commands.heap\n29 import pwndbg.commands.hexdump\n30 import pwndbg.commands.ida\n31 import pwndbg.commands.misc\n32 import pwndbg.commands.next\n33 import pwndbg.commands.peda\n34 import pwndbg.commands.procinfo\n35 import pwndbg.commands.radare2\n36 import pwndbg.commands.reload\n37 import pwndbg.commands.rop\n38 import pwndbg.commands.ropper\n39 import pwndbg.commands.search\n40 import pwndbg.commands.segments\n41 import pwndbg.commands.shell\n42 import pwndbg.commands.stack\n43 import pwndbg.commands.start\n44 import pwndbg.commands.telescope\n45 import pwndbg.commands.theme\n46 import pwndbg.commands.version\n47 import pwndbg.commands.vmmap\n48 import pwndbg.commands.windbg\n49 import pwndbg.commands.xor\n50 import pwndbg.constants\n51 import pwndbg.disasm\n52 import pwndbg.disasm.arm\n53 import pwndbg.disasm.jump\n54 import pwndbg.disasm.mips\n55 import pwndbg.disasm.ppc\n56 import pwndbg.disasm.sparc\n57 import pwndbg.disasm.x86\n58 import pwndbg.dt\n59 import pwndbg.elf\n60 import pwndbg.exception\n61 import pwndbg.heap\n62 import pwndbg.inthook\n63 import pwndbg.memory\n64 import pwndbg.net\n65 import pwndbg.proc\n66 import pwndbg.prompt\n67 import pwndbg.regs\n68 import pwndbg.stack\n69 import pwndbg.typeinfo\n70 import pwndbg.version\n71 import pwndbg.vmmap\n72 import pwndbg.wrappers\n73 \n74 __version__ = pwndbg.version.__version__\n75 version = __version__\n76 \n77 try:\n78 import unicorn\n79 import pwndbg.emu\n80 except:\n81 pass\n82 \n83 __all__ = [\n84 'arch',\n85 'auxv',\n86 'chain',\n87 'color',\n88 'compat',\n89 'disasm',\n90 'dt',\n91 'elf',\n92 'enhance',\n93 'events',\n94 'file',\n95 'function',\n96 'heap',\n97 'hexdump',\n98 'ida',\n99 'info',\n100 'linkmap',\n101 'malloc',\n102 'memoize',\n103 'memory',\n104 'proc',\n105 'regs',\n106 'remote',\n107 'search',\n108 'stack',\n109 'strings',\n110 'symbol',\n111 'typeinfo',\n112 'ui',\n113 'vmmap'\n114 ]\n115 \n116 prompt = \"pwndbg> \"\n117 prompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\n118 prompt = pwndbg.color.red(prompt)\n119 prompt = pwndbg.color.bold(prompt)\n120 prompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n121 \n122 pre_commands = \"\"\"\n123 set confirm off\n124 set verbose off\n125 set prompt %s\n126 set pagination off\n127 set height 0\n128 set history expansion on\n129 set history save on\n130 set follow-fork-mode child\n131 set backtrace past-main on\n132 set step-mode on\n133 set print pretty on\n134 set width 0\n135 set print elements 15\n136 handle SIGALRM nostop print nopass\n137 handle SIGBUS stop print nopass\n138 handle SIGPIPE nostop print nopass\n139 handle SIGSEGV stop print nopass\n140 \"\"\".strip() % prompt\n141 \n142 for line in pre_commands.strip().splitlines():\n143 gdb.execute(line)\n144 \n145 # This may throw an exception, see pwndbg/pwndbg#27\n146 try:\n147 gdb.execute(\"set disassembly-flavor intel\")\n148 except gdb.error:\n149 pass\n150\n[end of pwndbg/__init__.py]\n[start of pwndbg/ui.py]\n1 #!/usr/bin/env python\n2 # -*- coding: utf-8 -*-\n3 \"\"\"\n4 A few helpers for making things print pretty-like.\n5 \"\"\"\n6 from __future__ import absolute_import\n7 from __future__ import division\n8 from __future__ import print_function\n9 from __future__ import unicode_literals\n10 \n11 import fcntl\n12 import struct\n13 import sys\n14 import termios\n15 \n16 import pwndbg.arch\n17 import pwndbg.color.context as C\n18 import pwndbg.color.theme as theme\n19 import pwndbg.config as config\n20 \n21 theme.Parameter('banner-separator', '─', 'repeated banner separator character')\n22 \n23 def banner(title):\n24 title = title.upper()\n25 try:\n26 _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n27 except:\n28 width = 80\n29 width -= 2\n30 return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n31 \n32 def addrsz(address):\n33 address = int(address) & pwndbg.arch.ptrmask\n34 return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n35\n[end of pwndbg/ui.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py\n--- a/pwndbg/__init__.py\n+++ b/pwndbg/__init__.py\n@@ -5,6 +5,8 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import signal\n+\n import gdb\n \n import pwndbg.android\n@@ -67,6 +69,7 @@\n import pwndbg.regs\n import pwndbg.stack\n import pwndbg.typeinfo\n+import pwndbg.ui\n import pwndbg.version\n import pwndbg.vmmap\n import pwndbg.wrappers\n@@ -131,13 +134,13 @@\n set backtrace past-main on\n set step-mode on\n set print pretty on\n-set width 0\n+set width %i\n set print elements 15\n handle SIGALRM nostop print nopass\n handle SIGBUS stop print nopass\n handle SIGPIPE nostop print nopass\n handle SIGSEGV stop print nopass\n-\"\"\".strip() % prompt\n+\"\"\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\n \n for line in pre_commands.strip().splitlines():\n gdb.execute(line)\n@@ -147,3 +150,7 @@\n gdb.execute(\"set disassembly-flavor intel\")\n except gdb.error:\n pass\n+\n+\n+# handle resize event to align width and completion\n+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]))\ndiff --git a/pwndbg/ui.py b/pwndbg/ui.py\n--- a/pwndbg/ui.py\n+++ b/pwndbg/ui.py\n@@ -9,6 +9,7 @@\n from __future__ import unicode_literals\n \n import fcntl\n+import os\n import struct\n import sys\n import termios\n@@ -22,13 +23,21 @@\n \n def banner(title):\n title = title.upper()\n- try:\n- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n- except:\n- width = 80\n+ _height, width = get_window_size()\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n \n def addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n+\n+def get_window_size():\n+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\n+ if not sys.stdin.isatty:\n+ return fallback\n+ try:\n+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\n+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n+ except:\n+ rows, cols = fallback\n+ return rows, cols\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py\\n--- a/pwndbg/__init__.py\\n+++ b/pwndbg/__init__.py\\n@@ -5,6 +5,8 @@\\n from __future__ import print_function\\n from __future__ import unicode_literals\\n \\n+import signal\\n+\\n import gdb\\n \\n import pwndbg.android\\n@@ -67,6 +69,7 @@\\n import pwndbg.regs\\n import pwndbg.stack\\n import pwndbg.typeinfo\\n+import pwndbg.ui\\n import pwndbg.version\\n import pwndbg.vmmap\\n import pwndbg.wrappers\\n@@ -131,13 +134,13 @@\\n set backtrace past-main on\\n set step-mode on\\n set print pretty on\\n-set width 0\\n+set width %i\\n set print elements 15\\n handle SIGALRM nostop print nopass\\n handle SIGBUS stop print nopass\\n handle SIGPIPE nostop print nopass\\n handle SIGSEGV stop print nopass\\n-\\\"\\\"\\\".strip() % prompt\\n+\\\"\\\"\\\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\\n \\n for line in pre_commands.strip().splitlines():\\n gdb.execute(line)\\n@@ -147,3 +150,7 @@\\n gdb.execute(\\\"set disassembly-flavor intel\\\")\\n except gdb.error:\\n pass\\n+\\n+\\n+# handle resize event to align width and completion\\n+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\\\"set width %i\\\" % pwndbg.ui.get_window_size()[1]))\\ndiff --git a/pwndbg/ui.py b/pwndbg/ui.py\\n--- a/pwndbg/ui.py\\n+++ b/pwndbg/ui.py\\n@@ -9,6 +9,7 @@\\n from __future__ import unicode_literals\\n \\n import fcntl\\n+import os\\n import struct\\n import sys\\n import termios\\n@@ -22,13 +23,21 @@\\n \\n def banner(title):\\n title = title.upper()\\n- try:\\n- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\\n- except:\\n- width = 80\\n+ _height, width = get_window_size()\\n width -= 2\\n return C.banner((\\\"[{:%s^%ss}]\\\" % (config.banner_separator, width)).format(title))\\n \\n def addrsz(address):\\n address = int(address) & pwndbg.arch.ptrmask\\n return \\\"%{}x\\\".format(2*pwndbg.arch.ptrsize) % address\\n+\\n+def get_window_size():\\n+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\\n+ if not sys.stdin.isatty:\\n+ return fallback\\n+ try:\\n+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\\n+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\\n+ except:\\n+ rows, cols = fallback\\n+ return rows, cols\\n\", \"issue\": \"Typesetting seems to be wrong\\n![image](https://cloud.githubusercontent.com/assets/7897423/24843862/42e004bc-1dd9-11e7-8447-b8cf87ed6fe1.png)\\r\\n\\n\", \"before_files\": [{\"content\": \"#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\nfrom __future__ import absolute_import\\nfrom __future__ import division\\nfrom __future__ import print_function\\nfrom __future__ import unicode_literals\\n\\nimport gdb\\n\\nimport pwndbg.android\\nimport pwndbg.arch\\nimport pwndbg.arguments\\nimport pwndbg.argv\\nimport pwndbg.color\\nimport pwndbg.commands\\nimport pwndbg.commands.argv\\nimport pwndbg.commands.aslr\\nimport pwndbg.commands.auxv\\nimport pwndbg.commands.checksec\\nimport pwndbg.commands.config\\nimport pwndbg.commands.context\\nimport pwndbg.commands.cpsr\\nimport pwndbg.commands.dt\\nimport pwndbg.commands.dumpargs\\nimport pwndbg.commands.elf\\nimport pwndbg.commands.gdbinit\\nimport pwndbg.commands.got\\nimport pwndbg.commands.heap\\nimport pwndbg.commands.hexdump\\nimport pwndbg.commands.ida\\nimport pwndbg.commands.misc\\nimport pwndbg.commands.next\\nimport pwndbg.commands.peda\\nimport pwndbg.commands.procinfo\\nimport pwndbg.commands.radare2\\nimport pwndbg.commands.reload\\nimport pwndbg.commands.rop\\nimport pwndbg.commands.ropper\\nimport pwndbg.commands.search\\nimport pwndbg.commands.segments\\nimport pwndbg.commands.shell\\nimport pwndbg.commands.stack\\nimport pwndbg.commands.start\\nimport pwndbg.commands.telescope\\nimport pwndbg.commands.theme\\nimport pwndbg.commands.version\\nimport pwndbg.commands.vmmap\\nimport pwndbg.commands.windbg\\nimport pwndbg.commands.xor\\nimport pwndbg.constants\\nimport pwndbg.disasm\\nimport pwndbg.disasm.arm\\nimport pwndbg.disasm.jump\\nimport pwndbg.disasm.mips\\nimport pwndbg.disasm.ppc\\nimport pwndbg.disasm.sparc\\nimport pwndbg.disasm.x86\\nimport pwndbg.dt\\nimport pwndbg.elf\\nimport pwndbg.exception\\nimport pwndbg.heap\\nimport pwndbg.inthook\\nimport pwndbg.memory\\nimport pwndbg.net\\nimport pwndbg.proc\\nimport pwndbg.prompt\\nimport pwndbg.regs\\nimport pwndbg.stack\\nimport pwndbg.typeinfo\\nimport pwndbg.version\\nimport pwndbg.vmmap\\nimport pwndbg.wrappers\\n\\n__version__ = pwndbg.version.__version__\\nversion = __version__\\n\\ntry:\\n import unicorn\\n import pwndbg.emu\\nexcept:\\n pass\\n\\n__all__ = [\\n'arch',\\n'auxv',\\n'chain',\\n'color',\\n'compat',\\n'disasm',\\n'dt',\\n'elf',\\n'enhance',\\n'events',\\n'file',\\n'function',\\n'heap',\\n'hexdump',\\n'ida',\\n'info',\\n'linkmap',\\n'malloc',\\n'memoize',\\n'memory',\\n'proc',\\n'regs',\\n'remote',\\n'search',\\n'stack',\\n'strings',\\n'symbol',\\n'typeinfo',\\n'ui',\\n'vmmap'\\n]\\n\\nprompt = \\\"pwndbg> \\\"\\nprompt = \\\"\\\\x02\\\" + prompt + \\\"\\\\x01\\\" # STX + prompt + SOH\\nprompt = pwndbg.color.red(prompt)\\nprompt = pwndbg.color.bold(prompt)\\nprompt = \\\"\\\\x01\\\" + prompt + \\\"\\\\x02\\\" # SOH + prompt + STX\\n\\npre_commands = \\\"\\\"\\\"\\nset confirm off\\nset verbose off\\nset prompt %s\\nset pagination off\\nset height 0\\nset history expansion on\\nset history save on\\nset follow-fork-mode child\\nset backtrace past-main on\\nset step-mode on\\nset print pretty on\\nset width 0\\nset print elements 15\\nhandle SIGALRM nostop print nopass\\nhandle SIGBUS stop print nopass\\nhandle SIGPIPE nostop print nopass\\nhandle SIGSEGV stop print nopass\\n\\\"\\\"\\\".strip() % prompt\\n\\nfor line in pre_commands.strip().splitlines():\\n gdb.execute(line)\\n\\n# This may throw an exception, see pwndbg/pwndbg#27\\ntry:\\n gdb.execute(\\\"set disassembly-flavor intel\\\")\\nexcept gdb.error:\\n pass\\n\", \"path\": \"pwndbg/__init__.py\"}, {\"content\": \"#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n\\\"\\\"\\\"\\nA few helpers for making things print pretty-like.\\n\\\"\\\"\\\"\\nfrom __future__ import absolute_import\\nfrom __future__ import division\\nfrom __future__ import print_function\\nfrom __future__ import unicode_literals\\n\\nimport fcntl\\nimport struct\\nimport sys\\nimport termios\\n\\nimport pwndbg.arch\\nimport pwndbg.color.context as C\\nimport pwndbg.color.theme as theme\\nimport pwndbg.config as config\\n\\ntheme.Parameter('banner-separator', '\\u2500', 'repeated banner separator character')\\n\\ndef banner(title):\\n title = title.upper()\\n try:\\n _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\\n except:\\n width = 80\\n width -= 2\\n return C.banner((\\\"[{:%s^%ss}]\\\" % (config.banner_separator, width)).format(title))\\n\\ndef addrsz(address):\\n address = int(address) & pwndbg.arch.ptrmask\\n return \\\"%{}x\\\".format(2*pwndbg.arch.ptrsize) % address\\n\", \"path\": \"pwndbg/ui.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2183,"string":"2,183"},"num_tokens_diff":{"kind":"number","value":707,"string":"707"}}},{"rowIdx":18196,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_39662"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"jupyterhub__zero-to-jupyterhub-k8s-531"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nculler is failing and exiting when culling users and servers are slow to stop\nTwo issues:\r\n\r\n1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this\r\n2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.\n\n\n\n[start of images/hub/cull_idle_servers.py]\n1 #!/usr/bin/env python3\n2 # Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py\n3 \"\"\"script to monitor and cull idle single-user servers\n4 \n5 Caveats:\n6 \n7 last_activity is not updated with high frequency,\n8 so cull timeout should be greater than the sum of:\n9 \n10 - single-user websocket ping interval (default: 30s)\n11 - JupyterHub.last_activity_interval (default: 5 minutes)\n12 \n13 You can run this as a service managed by JupyterHub with this in your config::\n14 \n15 \n16 c.JupyterHub.services = [\n17 {\n18 'name': 'cull-idle',\n19 'admin': True,\n20 'command': 'python cull_idle_servers.py --timeout=3600'.split(),\n21 }\n22 ]\n23 \n24 Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:\n25 \n26 export JUPYTERHUB_API_TOKEN=`jupyterhub token`\n27 python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]\n28 \"\"\"\n29 \n30 import datetime\n31 import json\n32 import os\n33 \n34 from dateutil.parser import parse as parse_date\n35 \n36 from tornado.gen import coroutine\n37 from tornado.log import app_log\n38 from tornado.httpclient import AsyncHTTPClient, HTTPRequest\n39 from tornado.ioloop import IOLoop, PeriodicCallback\n40 from tornado.options import define, options, parse_command_line\n41 \n42 \n43 @coroutine\n44 def cull_idle(url, api_token, timeout, cull_users=False):\n45 \"\"\"Shutdown idle single-user servers\n46 \n47 If cull_users, inactive *users* will be deleted as well.\n48 \"\"\"\n49 auth_header = {\n50 'Authorization': 'token %s' % api_token\n51 }\n52 req = HTTPRequest(url=url + '/users',\n53 headers=auth_header,\n54 )\n55 now = datetime.datetime.utcnow()\n56 cull_limit = now - datetime.timedelta(seconds=timeout)\n57 client = AsyncHTTPClient()\n58 resp = yield client.fetch(req)\n59 users = json.loads(resp.body.decode('utf8', 'replace'))\n60 futures = []\n61 \n62 @coroutine\n63 def cull_one(user, last_activity):\n64 \"\"\"cull one user\"\"\"\n65 \n66 # shutdown server first. Hub doesn't allow deleting users with running servers.\n67 if user['server']:\n68 app_log.info(\"Culling server for %s (inactive since %s)\", user['name'], last_activity)\n69 req = HTTPRequest(url=url + '/users/%s/server' % user['name'],\n70 method='DELETE',\n71 headers=auth_header,\n72 )\n73 yield client.fetch(req)\n74 if cull_users:\n75 app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n76 req = HTTPRequest(url=url + '/users/%s' % user['name'],\n77 method='DELETE',\n78 headers=auth_header,\n79 )\n80 yield client.fetch(req)\n81 \n82 for user in users:\n83 if not user['server'] and not cull_users:\n84 # server not running and not culling users, nothing to do\n85 continue\n86 last_activity = parse_date(user['last_activity'])\n87 if last_activity < cull_limit:\n88 futures.append((user['name'], cull_one(user, last_activity)))\n89 else:\n90 app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n91 \n92 for (name, f) in futures:\n93 yield f\n94 app_log.debug(\"Finished culling %s\", name)\n95 \n96 \n97 if __name__ == '__main__':\n98 define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help=\"The JupyterHub API URL\")\n99 define('timeout', default=600, help=\"The idle timeout (in seconds)\")\n100 define('cull_every', default=0, help=\"The interval (in seconds) for checking for idle servers to cull\")\n101 define('cull_users', default=False,\n102 help=\"\"\"Cull users in addition to servers.\n103 This is for use in temporary-user cases such as tmpnb.\"\"\",\n104 )\n105 \n106 parse_command_line()\n107 if not options.cull_every:\n108 options.cull_every = options.timeout // 2\n109 api_token = os.environ['JUPYTERHUB_API_TOKEN']\n110 \n111 loop = IOLoop.current()\n112 cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n113 # run once before scheduling periodic call\n114 loop.run_sync(cull)\n115 # schedule periodic cull\n116 pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n117 pc.start()\n118 try:\n119 loop.start()\n120 except KeyboardInterrupt:\n121 pass\n122\n[end of images/hub/cull_idle_servers.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py\n--- a/images/hub/cull_idle_servers.py\n+++ b/images/hub/cull_idle_servers.py\n@@ -70,7 +70,15 @@\n method='DELETE',\n headers=auth_header,\n )\n- yield client.fetch(req)\n+ resp = yield client.fetch(req)\n+ if resp.code == 202:\n+ msg = \"Server for {} is slow to stop.\".format(user['name'])\n+ if cull_users:\n+ app_log.warning(msg + \" Not culling user yet.\")\n+ # return here so we don't continue to cull the user\n+ # which will fail if the server is still trying to shutdown\n+ return\n+ app_log.warning(msg)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n@@ -83,12 +91,19 @@\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n+ if not user['last_activity']:\n+ continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n+ # user might be in a transition (e.g. starting or stopping)\n+ # don't try to cull if this is happening\n+ if user['pending']:\n+ app_log.warning(\"Not culling user %s with pending %s\", user['name'], user['pending'])\n+ continue\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n- \n+\n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n@@ -102,16 +117,17 @@\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n- \n+\n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n- \n+\n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n- # run once before scheduling periodic call\n- loop.run_sync(cull)\n+ # schedule first cull immediately\n+ # because PeriodicCallback doesn't start until the end of the first interval\n+ loop.add_callback(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py\\n--- a/images/hub/cull_idle_servers.py\\n+++ b/images/hub/cull_idle_servers.py\\n@@ -70,7 +70,15 @@\\n method='DELETE',\\n headers=auth_header,\\n )\\n- yield client.fetch(req)\\n+ resp = yield client.fetch(req)\\n+ if resp.code == 202:\\n+ msg = \\\"Server for {} is slow to stop.\\\".format(user['name'])\\n+ if cull_users:\\n+ app_log.warning(msg + \\\" Not culling user yet.\\\")\\n+ # return here so we don't continue to cull the user\\n+ # which will fail if the server is still trying to shutdown\\n+ return\\n+ app_log.warning(msg)\\n if cull_users:\\n app_log.info(\\\"Culling user %s (inactive since %s)\\\", user['name'], last_activity)\\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\\n@@ -83,12 +91,19 @@\\n if not user['server'] and not cull_users:\\n # server not running and not culling users, nothing to do\\n continue\\n+ if not user['last_activity']:\\n+ continue\\n last_activity = parse_date(user['last_activity'])\\n if last_activity < cull_limit:\\n+ # user might be in a transition (e.g. starting or stopping)\\n+ # don't try to cull if this is happening\\n+ if user['pending']:\\n+ app_log.warning(\\\"Not culling user %s with pending %s\\\", user['name'], user['pending'])\\n+ continue\\n futures.append((user['name'], cull_one(user, last_activity)))\\n else:\\n app_log.debug(\\\"Not culling %s (active since %s)\\\", user['name'], last_activity)\\n- \\n+\\n for (name, f) in futures:\\n yield f\\n app_log.debug(\\\"Finished culling %s\\\", name)\\n@@ -102,16 +117,17 @@\\n help=\\\"\\\"\\\"Cull users in addition to servers.\\n This is for use in temporary-user cases such as tmpnb.\\\"\\\"\\\",\\n )\\n- \\n+\\n parse_command_line()\\n if not options.cull_every:\\n options.cull_every = options.timeout // 2\\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\\n- \\n+\\n loop = IOLoop.current()\\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\\n- # run once before scheduling periodic call\\n- loop.run_sync(cull)\\n+ # schedule first cull immediately\\n+ # because PeriodicCallback doesn't start until the end of the first interval\\n+ loop.add_callback(cull)\\n # schedule periodic cull\\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\\n pc.start()\\n\", \"issue\": \"culler is failing and exiting when culling users and servers are slow to stop\\nTwo issues:\\r\\n\\r\\n1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this\\r\\n2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.\\n\", \"before_files\": [{\"content\": \"#!/usr/bin/env python3\\n# Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py\\n\\\"\\\"\\\"script to monitor and cull idle single-user servers\\n\\nCaveats:\\n\\nlast_activity is not updated with high frequency,\\nso cull timeout should be greater than the sum of:\\n\\n- single-user websocket ping interval (default: 30s)\\n- JupyterHub.last_activity_interval (default: 5 minutes)\\n\\nYou can run this as a service managed by JupyterHub with this in your config::\\n\\n\\n c.JupyterHub.services = [\\n {\\n 'name': 'cull-idle',\\n 'admin': True,\\n 'command': 'python cull_idle_servers.py --timeout=3600'.split(),\\n }\\n ]\\n\\nOr run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:\\n\\n export JUPYTERHUB_API_TOKEN=`jupyterhub token`\\n python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]\\n\\\"\\\"\\\"\\n\\nimport datetime\\nimport json\\nimport os\\n\\nfrom dateutil.parser import parse as parse_date\\n\\nfrom tornado.gen import coroutine\\nfrom tornado.log import app_log\\nfrom tornado.httpclient import AsyncHTTPClient, HTTPRequest\\nfrom tornado.ioloop import IOLoop, PeriodicCallback\\nfrom tornado.options import define, options, parse_command_line\\n\\n\\n@coroutine\\ndef cull_idle(url, api_token, timeout, cull_users=False):\\n \\\"\\\"\\\"Shutdown idle single-user servers\\n\\n If cull_users, inactive *users* will be deleted as well.\\n \\\"\\\"\\\"\\n auth_header = {\\n 'Authorization': 'token %s' % api_token\\n }\\n req = HTTPRequest(url=url + '/users',\\n headers=auth_header,\\n )\\n now = datetime.datetime.utcnow()\\n cull_limit = now - datetime.timedelta(seconds=timeout)\\n client = AsyncHTTPClient()\\n resp = yield client.fetch(req)\\n users = json.loads(resp.body.decode('utf8', 'replace'))\\n futures = []\\n\\n @coroutine\\n def cull_one(user, last_activity):\\n \\\"\\\"\\\"cull one user\\\"\\\"\\\"\\n\\n # shutdown server first. Hub doesn't allow deleting users with running servers.\\n if user['server']:\\n app_log.info(\\\"Culling server for %s (inactive since %s)\\\", user['name'], last_activity)\\n req = HTTPRequest(url=url + '/users/%s/server' % user['name'],\\n method='DELETE',\\n headers=auth_header,\\n )\\n yield client.fetch(req)\\n if cull_users:\\n app_log.info(\\\"Culling user %s (inactive since %s)\\\", user['name'], last_activity)\\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\\n method='DELETE',\\n headers=auth_header,\\n )\\n yield client.fetch(req)\\n\\n for user in users:\\n if not user['server'] and not cull_users:\\n # server not running and not culling users, nothing to do\\n continue\\n last_activity = parse_date(user['last_activity'])\\n if last_activity < cull_limit:\\n futures.append((user['name'], cull_one(user, last_activity)))\\n else:\\n app_log.debug(\\\"Not culling %s (active since %s)\\\", user['name'], last_activity)\\n \\n for (name, f) in futures:\\n yield f\\n app_log.debug(\\\"Finished culling %s\\\", name)\\n\\n\\nif __name__ == '__main__':\\n define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help=\\\"The JupyterHub API URL\\\")\\n define('timeout', default=600, help=\\\"The idle timeout (in seconds)\\\")\\n define('cull_every', default=0, help=\\\"The interval (in seconds) for checking for idle servers to cull\\\")\\n define('cull_users', default=False,\\n help=\\\"\\\"\\\"Cull users in addition to servers.\\n This is for use in temporary-user cases such as tmpnb.\\\"\\\"\\\",\\n )\\n \\n parse_command_line()\\n if not options.cull_every:\\n options.cull_every = options.timeout // 2\\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\\n \\n loop = IOLoop.current()\\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\\n # run once before scheduling periodic call\\n loop.run_sync(cull)\\n # schedule periodic cull\\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\\n pc.start()\\n try:\\n loop.start()\\n except KeyboardInterrupt:\\n pass\\n\", \"path\": \"images/hub/cull_idle_servers.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1995,"string":"1,995"},"num_tokens_diff":{"kind":"number","value":647,"string":"647"}}},{"rowIdx":18197,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_13209"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"cal-itp__benefits-38"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nEnsure cookies are enabled\nNeed a basic client-side check that cookies are enabled (and UI if not) as soon as the app loads, since we require cookies to store the temporary transaction data.\n\n\n\n[start of benefits/settings.py]\n1 \"\"\"\n2 Django settings for benefits project.\n3 \"\"\"\n4 import os\n5 \n6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n8 \n9 # SECURITY WARNING: keep the secret key used in production secret!\n10 SECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n11 \n12 # SECURITY WARNING: don't run with debug turned on in production!\n13 DEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n14 \n15 ADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n16 \n17 ALLOWED_HOSTS = []\n18 \n19 if DEBUG:\n20 ALLOWED_HOSTS.extend([\"*\"])\n21 else:\n22 hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split()\n23 ALLOWED_HOSTS.extend(hosts)\n24 \n25 # Application definition\n26 \n27 INSTALLED_APPS = [\n28 \"django.contrib.sessions\",\n29 \"django.contrib.staticfiles\",\n30 \"benefits.core\",\n31 \"benefits.enrollment\",\n32 \"benefits.eligibility\",\n33 ]\n34 \n35 if ADMIN:\n36 INSTALLED_APPS.extend(\n37 [\n38 \"django.contrib.admin\",\n39 \"django.contrib.auth\",\n40 \"django.contrib.contenttypes\",\n41 \"django.contrib.messages\",\n42 ]\n43 )\n44 \n45 MIDDLEWARE = [\n46 \"django.middleware.security.SecurityMiddleware\",\n47 \"django.contrib.sessions.middleware.SessionMiddleware\",\n48 \"django.middleware.locale.LocaleMiddleware\",\n49 \"django.middleware.common.CommonMiddleware\",\n50 \"django.middleware.csrf.CsrfViewMiddleware\",\n51 \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n52 \"benefits.core.middleware.DebugSession\",\n53 \"benefits.core.middleware.ChangedLanguageEvent\",\n54 ]\n55 \n56 if ADMIN:\n57 MIDDLEWARE.extend(\n58 [\n59 \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n60 \"django.contrib.messages.middleware.MessageMiddleware\",\n61 ]\n62 )\n63 \n64 CSRF_COOKIE_HTTPONLY = True\n65 \n66 SESSION_COOKIE_AGE = 3600\n67 SESSION_COOKIE_SAMESITE = \"Strict\"\n68 SESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n69 \n70 if not DEBUG:\n71 CSRF_COOKIE_SECURE = True\n72 SESSION_COOKIE_SECURE = True\n73 \n74 ROOT_URLCONF = \"benefits.urls\"\n75 \n76 template_ctx_processors = [\n77 \"django.template.context_processors.request\",\n78 \"benefits.core.context_processors.analytics\",\n79 ]\n80 \n81 if DEBUG:\n82 template_ctx_processors.extend(\n83 [\n84 \"django.template.context_processors.debug\",\n85 \"benefits.core.context_processors.debug\",\n86 ]\n87 )\n88 \n89 if ADMIN:\n90 template_ctx_processors.extend(\n91 [\n92 \"django.contrib.auth.context_processors.auth\",\n93 \"django.contrib.messages.context_processors.messages\",\n94 ]\n95 )\n96 \n97 TEMPLATES = [\n98 {\n99 \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n100 \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n101 \"APP_DIRS\": True,\n102 \"OPTIONS\": {\n103 \"context_processors\": template_ctx_processors,\n104 },\n105 },\n106 ]\n107 \n108 WSGI_APPLICATION = \"benefits.wsgi.application\"\n109 \n110 DATABASES = {\n111 \"default\": {\n112 \"ENGINE\": \"django.db.backends.sqlite3\",\n113 \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n114 }\n115 }\n116 \n117 # Password validation\n118 \n119 AUTH_PASSWORD_VALIDATORS = []\n120 \n121 if ADMIN:\n122 AUTH_PASSWORD_VALIDATORS.extend(\n123 [\n124 {\n125 \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n126 },\n127 {\n128 \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n129 },\n130 {\n131 \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n132 },\n133 {\n134 \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n135 },\n136 ]\n137 )\n138 \n139 # Internationalization\n140 \n141 LANGUAGE_CODE = \"en\"\n142 \n143 LANGUAGES = [(\"en\", \"English\"), (\"es\", \"Español\")]\n144 \n145 LOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n146 \n147 USE_I18N = True\n148 USE_L10N = True\n149 \n150 TIME_ZONE = \"UTC\"\n151 USE_TZ = True\n152 \n153 # Static files (CSS, JavaScript, Images)\n154 \n155 STATIC_URL = \"/static/\"\n156 STATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\n157 STATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n158 \n159 # Logging configuration\n160 \n161 LOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\n162 LOGGING = {\n163 \"version\": 1,\n164 \"disable_existing_loggers\": False,\n165 \"formatters\": {\n166 \"default\": {\n167 \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n168 \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n169 \"style\": \"{\",\n170 },\n171 },\n172 \"handlers\": {\n173 \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n174 },\n175 \"root\": {\n176 \"handlers\": [\"default\"],\n177 \"level\": LOG_LEVEL,\n178 },\n179 \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n180 }\n181 \n182 # Analytics configuration\n183 \n184 ANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n185\n[end of benefits/settings.py]\n[start of benefits/urls.py]\n1 \"\"\"\n2 benefits URL Configuration\n3 \n4 The `urlpatterns` list routes URLs to views. For more information please see:\n5 https://docs.djangoproject.com/en/3.1/topics/http/urls/\n6 \"\"\"\n7 import logging\n8 \n9 from django.urls import include, path\n10 \n11 from benefits.settings import ADMIN\n12 \n13 \n14 logger = logging.getLogger(__name__)\n15 \n16 handler400 = \"benefits.core.views.bad_request\"\n17 handler403 = \"benefits.core.views.bad_request\"\n18 handler404 = \"benefits.core.views.page_not_found\"\n19 handler500 = \"benefits.core.views.server_error\"\n20 \n21 urlpatterns = [\n22 path(\"\", include(\"benefits.core.urls\")),\n23 path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n24 path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n25 path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n26 ]\n27 \n28 if ADMIN:\n29 from django.contrib import admin\n30 \n31 logger.debug(\"Register admin/ urls\")\n32 urlpatterns.append(path(\"admin/\", admin.site.urls))\n33 else:\n34 logger.debug(\"Skip url registrations for admin\")\n35\n[end of benefits/urls.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -69,6 +69,7 @@\n \n if not DEBUG:\n CSRF_COOKIE_SECURE = True\n+ CSRF_FAILURE_VIEW = \"benefits.core.views.bad_request\"\n SESSION_COOKIE_SECURE = True\n \n ROOT_URLCONF = \"benefits.urls\"\ndiff --git a/benefits/urls.py b/benefits/urls.py\n--- a/benefits/urls.py\n+++ b/benefits/urls.py\n@@ -20,8 +20,8 @@\n \n urlpatterns = [\n path(\"\", include(\"benefits.core.urls\")),\n- path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n+ path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n ]\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/benefits/settings.py b/benefits/settings.py\\n--- a/benefits/settings.py\\n+++ b/benefits/settings.py\\n@@ -69,6 +69,7 @@\\n \\n if not DEBUG:\\n CSRF_COOKIE_SECURE = True\\n+ CSRF_FAILURE_VIEW = \\\"benefits.core.views.bad_request\\\"\\n SESSION_COOKIE_SECURE = True\\n \\n ROOT_URLCONF = \\\"benefits.urls\\\"\\ndiff --git a/benefits/urls.py b/benefits/urls.py\\n--- a/benefits/urls.py\\n+++ b/benefits/urls.py\\n@@ -20,8 +20,8 @@\\n \\n urlpatterns = [\\n path(\\\"\\\", include(\\\"benefits.core.urls\\\")),\\n- path(\\\"enrollment/\\\", include(\\\"benefits.enrollment.urls\\\")),\\n path(\\\"eligibility/\\\", include(\\\"benefits.eligibility.urls\\\")),\\n+ path(\\\"enrollment/\\\", include(\\\"benefits.enrollment.urls\\\")),\\n path(\\\"i18n/\\\", include(\\\"django.conf.urls.i18n\\\")),\\n ]\\n\", \"issue\": \"Ensure cookies are enabled\\nNeed a basic client-side check that cookies are enabled (and UI if not) as soon as the app loads, since we require cookies to store the temporary transaction data.\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"\\nDjango settings for benefits project.\\n\\\"\\\"\\\"\\nimport os\\n\\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\\n\\n# SECURITY WARNING: keep the secret key used in production secret!\\nSECRET_KEY = os.environ[\\\"DJANGO_SECRET_KEY\\\"]\\n\\n# SECURITY WARNING: don't run with debug turned on in production!\\nDEBUG = os.environ.get(\\\"DJANGO_DEBUG\\\", \\\"False\\\").lower() == \\\"true\\\"\\n\\nADMIN = os.environ.get(\\\"DJANGO_ADMIN\\\", \\\"False\\\").lower() == \\\"true\\\"\\n\\nALLOWED_HOSTS = []\\n\\nif DEBUG:\\n ALLOWED_HOSTS.extend([\\\"*\\\"])\\nelse:\\n hosts = os.environ[\\\"DJANGO_ALLOWED_HOSTS\\\"].split()\\n ALLOWED_HOSTS.extend(hosts)\\n\\n# Application definition\\n\\nINSTALLED_APPS = [\\n \\\"django.contrib.sessions\\\",\\n \\\"django.contrib.staticfiles\\\",\\n \\\"benefits.core\\\",\\n \\\"benefits.enrollment\\\",\\n \\\"benefits.eligibility\\\",\\n]\\n\\nif ADMIN:\\n INSTALLED_APPS.extend(\\n [\\n \\\"django.contrib.admin\\\",\\n \\\"django.contrib.auth\\\",\\n \\\"django.contrib.contenttypes\\\",\\n \\\"django.contrib.messages\\\",\\n ]\\n )\\n\\nMIDDLEWARE = [\\n \\\"django.middleware.security.SecurityMiddleware\\\",\\n \\\"django.contrib.sessions.middleware.SessionMiddleware\\\",\\n \\\"django.middleware.locale.LocaleMiddleware\\\",\\n \\\"django.middleware.common.CommonMiddleware\\\",\\n \\\"django.middleware.csrf.CsrfViewMiddleware\\\",\\n \\\"django.middleware.clickjacking.XFrameOptionsMiddleware\\\",\\n \\\"benefits.core.middleware.DebugSession\\\",\\n \\\"benefits.core.middleware.ChangedLanguageEvent\\\",\\n]\\n\\nif ADMIN:\\n MIDDLEWARE.extend(\\n [\\n \\\"django.contrib.auth.middleware.AuthenticationMiddleware\\\",\\n \\\"django.contrib.messages.middleware.MessageMiddleware\\\",\\n ]\\n )\\n\\nCSRF_COOKIE_HTTPONLY = True\\n\\nSESSION_COOKIE_AGE = 3600\\nSESSION_COOKIE_SAMESITE = \\\"Strict\\\"\\nSESSION_ENGINE = \\\"django.contrib.sessions.backends.signed_cookies\\\"\\n\\nif not DEBUG:\\n CSRF_COOKIE_SECURE = True\\n SESSION_COOKIE_SECURE = True\\n\\nROOT_URLCONF = \\\"benefits.urls\\\"\\n\\ntemplate_ctx_processors = [\\n \\\"django.template.context_processors.request\\\",\\n \\\"benefits.core.context_processors.analytics\\\",\\n]\\n\\nif DEBUG:\\n template_ctx_processors.extend(\\n [\\n \\\"django.template.context_processors.debug\\\",\\n \\\"benefits.core.context_processors.debug\\\",\\n ]\\n )\\n\\nif ADMIN:\\n template_ctx_processors.extend(\\n [\\n \\\"django.contrib.auth.context_processors.auth\\\",\\n \\\"django.contrib.messages.context_processors.messages\\\",\\n ]\\n )\\n\\nTEMPLATES = [\\n {\\n \\\"BACKEND\\\": \\\"django.template.backends.django.DjangoTemplates\\\",\\n \\\"DIRS\\\": [os.path.join(BASE_DIR, \\\"benefits\\\", \\\"templates\\\")],\\n \\\"APP_DIRS\\\": True,\\n \\\"OPTIONS\\\": {\\n \\\"context_processors\\\": template_ctx_processors,\\n },\\n },\\n]\\n\\nWSGI_APPLICATION = \\\"benefits.wsgi.application\\\"\\n\\nDATABASES = {\\n \\\"default\\\": {\\n \\\"ENGINE\\\": \\\"django.db.backends.sqlite3\\\",\\n \\\"NAME\\\": os.environ.get(\\\"DJANGO_DB\\\", \\\"django\\\") + \\\".db\\\",\\n }\\n}\\n\\n# Password validation\\n\\nAUTH_PASSWORD_VALIDATORS = []\\n\\nif ADMIN:\\n AUTH_PASSWORD_VALIDATORS.extend(\\n [\\n {\\n \\\"NAME\\\": \\\"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\\\",\\n },\\n {\\n \\\"NAME\\\": \\\"django.contrib.auth.password_validation.MinimumLengthValidator\\\",\\n },\\n {\\n \\\"NAME\\\": \\\"django.contrib.auth.password_validation.CommonPasswordValidator\\\",\\n },\\n {\\n \\\"NAME\\\": \\\"django.contrib.auth.password_validation.NumericPasswordValidator\\\",\\n },\\n ]\\n )\\n\\n# Internationalization\\n\\nLANGUAGE_CODE = \\\"en\\\"\\n\\nLANGUAGES = [(\\\"en\\\", \\\"English\\\"), (\\\"es\\\", \\\"Espa\\u00f1ol\\\")]\\n\\nLOCALE_PATHS = [os.path.join(BASE_DIR, \\\"benefits\\\", \\\"locale\\\")]\\n\\nUSE_I18N = True\\nUSE_L10N = True\\n\\nTIME_ZONE = \\\"UTC\\\"\\nUSE_TZ = True\\n\\n# Static files (CSS, JavaScript, Images)\\n\\nSTATIC_URL = \\\"/static/\\\"\\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \\\"benefits\\\", \\\"static\\\")]\\nSTATIC_ROOT = os.path.join(BASE_DIR, \\\"static\\\")\\n\\n# Logging configuration\\n\\nLOG_LEVEL = os.environ.get(\\\"DJANGO_LOG_LEVEL\\\", \\\"DEBUG\\\" if DEBUG else \\\"WARNING\\\")\\nLOGGING = {\\n \\\"version\\\": 1,\\n \\\"disable_existing_loggers\\\": False,\\n \\\"formatters\\\": {\\n \\\"default\\\": {\\n \\\"format\\\": \\\"[{asctime}] {levelname} {name}:{lineno} {message}\\\",\\n \\\"datefmt\\\": \\\"%d/%b/%Y %H:%M:%S\\\",\\n \\\"style\\\": \\\"{\\\",\\n },\\n },\\n \\\"handlers\\\": {\\n \\\"default\\\": {\\\"class\\\": \\\"logging.StreamHandler\\\", \\\"formatter\\\": \\\"default\\\"},\\n },\\n \\\"root\\\": {\\n \\\"handlers\\\": [\\\"default\\\"],\\n \\\"level\\\": LOG_LEVEL,\\n },\\n \\\"loggers\\\": {\\\"django\\\": {\\\"handlers\\\": [\\\"default\\\"], \\\"propagate\\\": False}},\\n}\\n\\n# Analytics configuration\\n\\nANALYTICS_KEY = os.environ.get(\\\"ANALYTICS_KEY\\\")\\n\", \"path\": \"benefits/settings.py\"}, {\"content\": \"\\\"\\\"\\\"\\nbenefits URL Configuration\\n\\nThe `urlpatterns` list routes URLs to views. For more information please see:\\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\\n\\\"\\\"\\\"\\nimport logging\\n\\nfrom django.urls import include, path\\n\\nfrom benefits.settings import ADMIN\\n\\n\\nlogger = logging.getLogger(__name__)\\n\\nhandler400 = \\\"benefits.core.views.bad_request\\\"\\nhandler403 = \\\"benefits.core.views.bad_request\\\"\\nhandler404 = \\\"benefits.core.views.page_not_found\\\"\\nhandler500 = \\\"benefits.core.views.server_error\\\"\\n\\nurlpatterns = [\\n path(\\\"\\\", include(\\\"benefits.core.urls\\\")),\\n path(\\\"enrollment/\\\", include(\\\"benefits.enrollment.urls\\\")),\\n path(\\\"eligibility/\\\", include(\\\"benefits.eligibility.urls\\\")),\\n path(\\\"i18n/\\\", include(\\\"django.conf.urls.i18n\\\")),\\n]\\n\\nif ADMIN:\\n from django.contrib import admin\\n\\n logger.debug(\\\"Register admin/ urls\\\")\\n urlpatterns.append(path(\\\"admin/\\\", admin.site.urls))\\nelse:\\n logger.debug(\\\"Skip url registrations for admin\\\")\\n\", \"path\": \"benefits/urls.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":2404,"string":"2,404"},"num_tokens_diff":{"kind":"number","value":212,"string":"212"}}},{"rowIdx":18198,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_19230"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"google__clusterfuzz-863"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nFuzzers page does not work well with large number of jobs\nFor libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.\r\n\r\n@oliverchang as fyi.\n\n\n\n[start of src/python/fuzzing/fuzzer_selection.py]\n1 # Copyright 2019 Google LLC\n2 #\n3 # Licensed under the Apache License, Version 2.0 (the \"License\");\n4 # you may not use this file except in compliance with the License.\n5 # You may obtain a copy of the License at\n6 #\n7 # http://www.apache.org/licenses/LICENSE-2.0\n8 #\n9 # Unless required by applicable law or agreed to in writing, software\n10 # distributed under the License is distributed on an \"AS IS\" BASIS,\n11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n12 # See the License for the specific language governing permissions and\n13 # limitations under the License.\n14 \"\"\"Helper functions to update fuzzer-job mappings, and select fuzzers to run.\"\"\"\n15 \n16 import collections\n17 \n18 from base import utils\n19 from datastore import data_types\n20 from datastore import fuzz_target_utils\n21 from datastore import ndb\n22 from datastore import ndb_utils\n23 from metrics import logs\n24 from system import environment\n25 \n26 # Used to prepare targets to be passed to utils.random_weighted_choice.\n27 WeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])\n28 \n29 \n30 def update_mappings_for_fuzzer(fuzzer, mappings=None):\n31 \"\"\"Clear existing mappings for a fuzzer, and replace them.\"\"\"\n32 if mappings is None:\n33 mappings = fuzzer.jobs\n34 \n35 query = data_types.FuzzerJob.query()\n36 query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)\n37 entities = ndb_utils.get_all_from_query(query)\n38 old_mappings = {}\n39 for entity in entities:\n40 old_mappings[(entity.job, entity.platform)] = entity\n41 \n42 new_mappings = []\n43 for job_name in mappings:\n44 job = data_types.Job.query(data_types.Job.name == job_name).get()\n45 if not job:\n46 logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n47 (job_name, fuzzer.name))\n48 continue\n49 \n50 mapping = old_mappings.pop((job_name, job.platform), None)\n51 if mapping:\n52 continue\n53 \n54 mapping = data_types.FuzzerJob()\n55 mapping.fuzzer = fuzzer.name\n56 mapping.job = job_name\n57 mapping.platform = job.platform\n58 new_mappings.append(mapping)\n59 \n60 ndb.put_multi(new_mappings)\n61 ndb.delete_multi([m.key for m in list(old_mappings.values())])\n62 \n63 \n64 def update_platform_for_job(job_name, new_platform):\n65 \"\"\"Update platform for all mappings for a particular job.\"\"\"\n66 query = data_types.FuzzerJob.query()\n67 query = query.filter(data_types.FuzzerJob.job == job_name)\n68 mappings = ndb_utils.get_all_from_query(query)\n69 new_mappings = []\n70 for mapping in mappings:\n71 mapping.platform = new_platform\n72 new_mappings.append(mapping)\n73 ndb.put_multi(new_mappings)\n74 \n75 \n76 def get_fuzz_task_payload(platform=None):\n77 \"\"\"Select a fuzzer that can run on this platform.\"\"\"\n78 if not platform:\n79 queue_override = environment.get_value('QUEUE_OVERRIDE')\n80 platform = queue_override if queue_override else environment.platform()\n81 \n82 query = data_types.FuzzerJob.query()\n83 query = query.filter(data_types.FuzzerJob.platform == platform)\n84 \n85 mappings = list(ndb_utils.get_all_from_query(query))\n86 if not mappings:\n87 return None, None\n88 \n89 selection = utils.random_weighted_choice(mappings)\n90 return selection.fuzzer, selection.job\n91 \n92 \n93 def select_fuzz_target(targets, target_weights):\n94 \"\"\"Select a fuzz target from a list of potential targets.\"\"\"\n95 assert targets\n96 \n97 weighted_targets = []\n98 for target in targets:\n99 weight = target_weights.get(target, 1.0)\n100 weighted_targets.append(WeightedTarget(target, weight))\n101 \n102 return utils.random_weighted_choice(weighted_targets).target\n103 \n104 \n105 def get_fuzz_target_weights():\n106 \"\"\"Get a list of fuzz target weights based on the current fuzzer.\"\"\"\n107 job_type = environment.get_value('JOB_NAME')\n108 \n109 target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))\n110 fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)\n111 \n112 weights = {}\n113 for fuzz_target, target_job in zip(fuzz_targets, target_jobs):\n114 if not fuzz_target:\n115 logs.log_error('Skipping weight assignment for fuzz target %s.' %\n116 target_job.fuzz_target_name)\n117 continue\n118 \n119 weights[fuzz_target.binary] = target_job.weight\n120 \n121 return weights\n122\n[end of src/python/fuzzing/fuzzer_selection.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py\n--- a/src/python/fuzzing/fuzzer_selection.py\n+++ b/src/python/fuzzing/fuzzer_selection.py\n@@ -37,20 +37,20 @@\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n- old_mappings[(entity.job, entity.platform)] = entity\n+ old_mappings[entity.job] = entity\n \n new_mappings = []\n for job_name in mappings:\n+ mapping = old_mappings.pop(job_name, None)\n+ if mapping:\n+ continue\n+\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n \n- mapping = old_mappings.pop((job_name, job.platform), None)\n- if mapping:\n- continue\n-\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py\\n--- a/src/python/fuzzing/fuzzer_selection.py\\n+++ b/src/python/fuzzing/fuzzer_selection.py\\n@@ -37,20 +37,20 @@\\n entities = ndb_utils.get_all_from_query(query)\\n old_mappings = {}\\n for entity in entities:\\n- old_mappings[(entity.job, entity.platform)] = entity\\n+ old_mappings[entity.job] = entity\\n \\n new_mappings = []\\n for job_name in mappings:\\n+ mapping = old_mappings.pop(job_name, None)\\n+ if mapping:\\n+ continue\\n+\\n job = data_types.Job.query(data_types.Job.name == job_name).get()\\n if not job:\\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\\n (job_name, fuzzer.name))\\n continue\\n \\n- mapping = old_mappings.pop((job_name, job.platform), None)\\n- if mapping:\\n- continue\\n-\\n mapping = data_types.FuzzerJob()\\n mapping.fuzzer = fuzzer.name\\n mapping.job = job_name\\n\", \"issue\": \"Fuzzers page does not work well with large number of jobs\\nFor libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.\\r\\n\\r\\n@oliverchang as fyi.\\n\", \"before_files\": [{\"content\": \"# Copyright 2019 Google LLC\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\\"\\\"\\\"Helper functions to update fuzzer-job mappings, and select fuzzers to run.\\\"\\\"\\\"\\n\\nimport collections\\n\\nfrom base import utils\\nfrom datastore import data_types\\nfrom datastore import fuzz_target_utils\\nfrom datastore import ndb\\nfrom datastore import ndb_utils\\nfrom metrics import logs\\nfrom system import environment\\n\\n# Used to prepare targets to be passed to utils.random_weighted_choice.\\nWeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])\\n\\n\\ndef update_mappings_for_fuzzer(fuzzer, mappings=None):\\n \\\"\\\"\\\"Clear existing mappings for a fuzzer, and replace them.\\\"\\\"\\\"\\n if mappings is None:\\n mappings = fuzzer.jobs\\n\\n query = data_types.FuzzerJob.query()\\n query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)\\n entities = ndb_utils.get_all_from_query(query)\\n old_mappings = {}\\n for entity in entities:\\n old_mappings[(entity.job, entity.platform)] = entity\\n\\n new_mappings = []\\n for job_name in mappings:\\n job = data_types.Job.query(data_types.Job.name == job_name).get()\\n if not job:\\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\\n (job_name, fuzzer.name))\\n continue\\n\\n mapping = old_mappings.pop((job_name, job.platform), None)\\n if mapping:\\n continue\\n\\n mapping = data_types.FuzzerJob()\\n mapping.fuzzer = fuzzer.name\\n mapping.job = job_name\\n mapping.platform = job.platform\\n new_mappings.append(mapping)\\n\\n ndb.put_multi(new_mappings)\\n ndb.delete_multi([m.key for m in list(old_mappings.values())])\\n\\n\\ndef update_platform_for_job(job_name, new_platform):\\n \\\"\\\"\\\"Update platform for all mappings for a particular job.\\\"\\\"\\\"\\n query = data_types.FuzzerJob.query()\\n query = query.filter(data_types.FuzzerJob.job == job_name)\\n mappings = ndb_utils.get_all_from_query(query)\\n new_mappings = []\\n for mapping in mappings:\\n mapping.platform = new_platform\\n new_mappings.append(mapping)\\n ndb.put_multi(new_mappings)\\n\\n\\ndef get_fuzz_task_payload(platform=None):\\n \\\"\\\"\\\"Select a fuzzer that can run on this platform.\\\"\\\"\\\"\\n if not platform:\\n queue_override = environment.get_value('QUEUE_OVERRIDE')\\n platform = queue_override if queue_override else environment.platform()\\n\\n query = data_types.FuzzerJob.query()\\n query = query.filter(data_types.FuzzerJob.platform == platform)\\n\\n mappings = list(ndb_utils.get_all_from_query(query))\\n if not mappings:\\n return None, None\\n\\n selection = utils.random_weighted_choice(mappings)\\n return selection.fuzzer, selection.job\\n\\n\\ndef select_fuzz_target(targets, target_weights):\\n \\\"\\\"\\\"Select a fuzz target from a list of potential targets.\\\"\\\"\\\"\\n assert targets\\n\\n weighted_targets = []\\n for target in targets:\\n weight = target_weights.get(target, 1.0)\\n weighted_targets.append(WeightedTarget(target, weight))\\n\\n return utils.random_weighted_choice(weighted_targets).target\\n\\n\\ndef get_fuzz_target_weights():\\n \\\"\\\"\\\"Get a list of fuzz target weights based on the current fuzzer.\\\"\\\"\\\"\\n job_type = environment.get_value('JOB_NAME')\\n\\n target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))\\n fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)\\n\\n weights = {}\\n for fuzz_target, target_job in zip(fuzz_targets, target_jobs):\\n if not fuzz_target:\\n logs.log_error('Skipping weight assignment for fuzz target %s.' %\\n target_job.fuzz_target_name)\\n continue\\n\\n weights[fuzz_target.binary] = target_job.weight\\n\\n return weights\\n\", \"path\": \"src/python/fuzzing/fuzzer_selection.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":1817,"string":"1,817"},"num_tokens_diff":{"kind":"number","value":248,"string":"248"}}},{"rowIdx":18199,"cells":{"problem_id":{"kind":"string","value":"gh_patches_debug_20320"},"source":{"kind":"string","value":"rasdani/github-patches"},"task_type":{"kind":"string","value":"git_diff"},"in_source_id":{"kind":"string","value":"praw-dev__praw-1104"},"prompt":{"kind":"string","value":"You will be provided with a partial code base and an issue statement explaining a problem to resolve.\n\nReplying to comments in quarantined subreddits returns an empty object\nI have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.\r\n\r\n```\r\npraw/models/reddit/mixins/replyable.py\", line 26, in reply\r\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\r\nIndexError: list index out of range\r\n```\r\nThe reply call succeeds, returning a valid http code and the following json\r\n```\r\n{'json': {'errors': [], 'data': {'things': []}}}\r\n```\r\n`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.\r\n\r\nI'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.\n\n\n\n[start of praw/models/reddit/mixins/replyable.py]\n1 \"\"\"Provide the ReplyableMixin class.\"\"\"\n2 from ....const import API_PATH\n3 \n4 \n5 class ReplyableMixin:\n6 \"\"\"Interface for RedditBase classes that can be replied to.\"\"\"\n7 \n8 def reply(self, body):\n9 \"\"\"Reply to the object.\n10 \n11 :param body: The markdown formatted content for a comment.\n12 :returns: A :class:`~.Comment` object for the newly created comment.\n13 \n14 Example usage:\n15 \n16 .. code:: python\n17 \n18 submission = reddit.submission(id='5or86n')\n19 submission.reply('reply')\n20 \n21 comment = reddit.comment(id='dxolpyc')\n22 comment.reply('reply')\n23 \n24 \"\"\"\n25 data = {\"text\": body, \"thing_id\": self.fullname}\n26 return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n27\n[end of praw/models/reddit/mixins/replyable.py]\n\n\nI need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.\n\ndiff --git a/file.py b/file.py\n--- a/file.py\n+++ b/file.py\n@@ -1,27 +1,35 @@\n def euclidean(a, b):\n- while b:\n- a, b = b, a % b\n- return a\n+ if b == 0:\n+ return a\n+ return euclidean(b, a % b)\n \n \n def bresenham(x0, y0, x1, y1):\n points = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n- sx = 1 if x0 < x1 else -1\n- sy = 1 if y0 < y1 else -1\n- err = dx - dy\n+ x, y = x0, y0\n+ sx = -1 if x0 > x1 else 1\n+ sy = -1 if y0 > y1 else 1\n \n- while True:\n- points.append((x0, y0))\n- if x0 == x1 and y0 == y1:\n- break\n- e2 = 2 * err\n- if e2 > -dy:\n- err -= dy\n- x0 += sx\n- if e2 < dx:\n- err += dx\n- y0 += sy\n+ if dx > dy:\n+ err = dx / 2.0\n+ while x != x1:\n+ points.append((x, y))\n+ err -= dy\n+ if err < 0:\n+ y += sy\n+ err += dx\n+ x += sx\n+ else:\n+ err = dy / 2.0\n+ while y != y1:\n+ points.append((x, y))\n+ err -= dx\n+ if err < 0:\n+ x += sx\n+ err += dy\n+ y += sy\n+\n+ points.append((x, y))\n return points\n\n"},"golden_diff":{"kind":"string","value":"diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py\n--- a/praw/models/reddit/mixins/replyable.py\n+++ b/praw/models/reddit/mixins/replyable.py\n@@ -9,7 +9,14 @@\n \"\"\"Reply to the object.\n \n :param body: The markdown formatted content for a comment.\n- :returns: A :class:`~.Comment` object for the newly created comment.\n+ :returns: A :class:`~.Comment` object for the newly created\n+ comment or ``None`` if Reddit doesn't provide one.\n+\n+ A ``None`` value can be returned if the target is a comment or\n+ submission in a quarantined subreddit and the authenticated user\n+ has not opt-ed in to viewing the content. When this happens the\n+ comment will be sucessfully created on Reddit and can be retried\n+ by drawing the comment from the user's comment history.\n \n Example usage:\n \n@@ -23,4 +30,8 @@\n \n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n- return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n+ comments = self._reddit.post(API_PATH[\"comment\"], data=data)\n+ try:\n+ return comments[0]\n+ except IndexError:\n+ return None\n"},"verification_info":{"kind":"string","value":"{\"golden_diff\": \"diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py\\n--- a/praw/models/reddit/mixins/replyable.py\\n+++ b/praw/models/reddit/mixins/replyable.py\\n@@ -9,7 +9,14 @@\\n \\\"\\\"\\\"Reply to the object.\\n \\n :param body: The markdown formatted content for a comment.\\n- :returns: A :class:`~.Comment` object for the newly created comment.\\n+ :returns: A :class:`~.Comment` object for the newly created\\n+ comment or ``None`` if Reddit doesn't provide one.\\n+\\n+ A ``None`` value can be returned if the target is a comment or\\n+ submission in a quarantined subreddit and the authenticated user\\n+ has not opt-ed in to viewing the content. When this happens the\\n+ comment will be sucessfully created on Reddit and can be retried\\n+ by drawing the comment from the user's comment history.\\n \\n Example usage:\\n \\n@@ -23,4 +30,8 @@\\n \\n \\\"\\\"\\\"\\n data = {\\\"text\\\": body, \\\"thing_id\\\": self.fullname}\\n- return self._reddit.post(API_PATH[\\\"comment\\\"], data=data)[0]\\n+ comments = self._reddit.post(API_PATH[\\\"comment\\\"], data=data)\\n+ try:\\n+ return comments[0]\\n+ except IndexError:\\n+ return None\\n\", \"issue\": \"Replying to comments in quarantined subreddits returns an empty object\\nI have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.\\r\\n\\r\\n```\\r\\npraw/models/reddit/mixins/replyable.py\\\", line 26, in reply\\r\\n return self._reddit.post(API_PATH[\\\"comment\\\"], data=data)[0]\\r\\nIndexError: list index out of range\\r\\n```\\r\\nThe reply call succeeds, returning a valid http code and the following json\\r\\n```\\r\\n{'json': {'errors': [], 'data': {'things': []}}}\\r\\n```\\r\\n`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.\\r\\n\\r\\nI'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.\\n\", \"before_files\": [{\"content\": \"\\\"\\\"\\\"Provide the ReplyableMixin class.\\\"\\\"\\\"\\nfrom ....const import API_PATH\\n\\n\\nclass ReplyableMixin:\\n \\\"\\\"\\\"Interface for RedditBase classes that can be replied to.\\\"\\\"\\\"\\n\\n def reply(self, body):\\n \\\"\\\"\\\"Reply to the object.\\n\\n :param body: The markdown formatted content for a comment.\\n :returns: A :class:`~.Comment` object for the newly created comment.\\n\\n Example usage:\\n\\n .. code:: python\\n\\n submission = reddit.submission(id='5or86n')\\n submission.reply('reply')\\n\\n comment = reddit.comment(id='dxolpyc')\\n comment.reply('reply')\\n\\n \\\"\\\"\\\"\\n data = {\\\"text\\\": body, \\\"thing_id\\\": self.fullname}\\n return self._reddit.post(API_PATH[\\\"comment\\\"], data=data)[0]\\n\", \"path\": \"praw/models/reddit/mixins/replyable.py\"}]}"},"num_tokens_prompt":{"kind":"number","value":975,"string":"975"},"num_tokens_diff":{"kind":"number","value":313,"string":"313"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":181,"numItemsPerPage":100,"numTotalItems":18558,"offset":18100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODgzMzc1NCwic3ViIjoiL2RhdGFzZXRzL3Jhc2RhbmkvZ2l0aHViLXBhdGNoZXMtZ2VuZXN5cy1zd2UtcHJvbXB0LTRrLWNvbnRleHQtMWstZGlmZiIsImV4cCI6MTc1ODgzNzM1NCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.jdB_OP3k83P9MvMR-Em_-mHTfxdNqP5jLGa_XgY04WDP0Sgn_ceMewYT_0TZA6-txzGs25l-EEO8yXboKkRfBQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_3543
rasdani/github-patches
git_diff
beeware__toga-1634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Source installs no longer working #1614 made some changes to the packaging of modules to support the release package workflow. The wheels generated from this process appear to work fine; however, source installs don't appear to be working. I've had problems on both macOS and Android. **To Reproduce** Steps to reproduce the behavior: 1. `briefcase run` or `briefcase run android` on Tutorial 0. **Expected behavior** App should start. **Environment:** - Operating System: macOS - Python version: 3.10 - Software versions: - Briefcase: 0.3.11 - Toga: 96881f093 </issue> <code> [start of src/web/setup.py] 1 #!/usr/bin/env python 2 import re 3 4 from setuptools import setup 5 6 # Version handline needs to be programatic because 7 # we can't import toga_web to compute the version; 8 # and to support versioned subpackage dependencies 9 with open('src/toga_web/__init__.py', encoding='utf8') as version_file: 10 version_match = re.search( 11 r"^__version__ = ['\"]([^'\"]*)['\"]", 12 version_file.read(), 13 re.M 14 ) 15 if version_match: 16 version = version_match.group(1) 17 else: 18 raise RuntimeError("Unable to find version string.") 19 20 setup( 21 version=version, 22 install_requires=[ 23 # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name 24 # toga-core is ambigous when on the package hasn't been published to 25 # PyPI. As a workaround, don't specify the dependency, and manually 26 # ensure that toga-core is installed. 27 # 'toga-core==%s' % version, 28 ], 29 ) 30 [end of src/web/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/web/setup.py b/src/web/setup.py --- a/src/web/setup.py +++ b/src/web/setup.py @@ -20,10 +20,6 @@ setup( version=version, install_requires=[ - # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name - # toga-core is ambigous when on the package hasn't been published to - # PyPI. As a workaround, don't specify the dependency, and manually - # ensure that toga-core is installed. - # 'toga-core==%s' % version, + 'toga-core==%s' % version, ], )
{"golden_diff": "diff --git a/src/web/setup.py b/src/web/setup.py\n--- a/src/web/setup.py\n+++ b/src/web/setup.py\n@@ -20,10 +20,6 @@\n setup(\n version=version,\n install_requires=[\n- # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name\n- # toga-core is ambigous when on the package hasn't been published to\n- # PyPI. As a workaround, don't specify the dependency, and manually\n- # ensure that toga-core is installed.\n- # 'toga-core==%s' % version,\n+ 'toga-core==%s' % version,\n ],\n )\n", "issue": "Source installs no longer working\n#1614 made some changes to the packaging of modules to support the release package workflow.\r\n\r\nThe wheels generated from this process appear to work fine; however, source installs don't appear to be working. I've had problems on both macOS and Android.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `briefcase run` or `briefcase run android` on Tutorial 0.\r\n\r\n**Expected behavior**\r\n\r\nApp should start.\r\n\r\n**Environment:**\r\n - Operating System: macOS\r\n - Python version: 3.10\r\n - Software versions:\r\n - Briefcase: 0.3.11\r\n - Toga: 96881f093\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_web to compute the version;\n# and to support versioned subpackage dependencies\nwith open('src/toga_web/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(),\n re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name\n # toga-core is ambigous when on the package hasn't been published to\n # PyPI. As a workaround, don't specify the dependency, and manually\n # ensure that toga-core is installed.\n # 'toga-core==%s' % version,\n ],\n)\n", "path": "src/web/setup.py"}]}
969
159
gh_patches_debug_2990
rasdani/github-patches
git_diff
aio-libs-abandoned__aioredis-py-535
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a BUSYGROUP reply error The XGROUP CREATE command can return a BUSYGROUP error when a group already exists: https://redis.io/commands/xgroup I think the `ReplyError` subclass for matching it would look like this: ```py class BusyGroupError(ReplyError): MATCH_REPLY = "BUSYGROUP Consumer Group name already exists" ``` </issue> <code> [start of aioredis/errors.py] 1 __all__ = [ 2 'RedisError', 3 'ProtocolError', 4 'ReplyError', 5 'MaxClientsError', 6 'AuthError', 7 'PipelineError', 8 'MultiExecError', 9 'WatchVariableError', 10 'ChannelClosedError', 11 'ConnectionClosedError', 12 'ConnectionForcedCloseError', 13 'PoolClosedError', 14 'MasterNotFoundError', 15 'SlaveNotFoundError', 16 'ReadOnlyError', 17 ] 18 19 20 class RedisError(Exception): 21 """Base exception class for aioredis exceptions.""" 22 23 24 class ProtocolError(RedisError): 25 """Raised when protocol error occurs.""" 26 27 28 class ReplyError(RedisError): 29 """Raised for redis error replies (-ERR).""" 30 31 MATCH_REPLY = None 32 33 def __new__(cls, msg, *args): 34 for klass in cls.__subclasses__(): 35 if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY): 36 return klass(msg, *args) 37 return super().__new__(cls, msg, *args) 38 39 40 class MaxClientsError(ReplyError): 41 """Raised for redis server when the maximum number of client has been 42 reached.""" 43 44 MATCH_REPLY = "ERR max number of clients reached" 45 46 47 class AuthError(ReplyError): 48 """Raised when authentication errors occurs.""" 49 50 MATCH_REPLY = ("NOAUTH ", "ERR invalid password") 51 52 53 class PipelineError(RedisError): 54 """Raised if command within pipeline raised error.""" 55 56 def __init__(self, errors): 57 super().__init__('{} errors:'.format(self.__class__.__name__), errors) 58 59 60 class MultiExecError(PipelineError): 61 """Raised if command within MULTI/EXEC block caused error.""" 62 63 64 class WatchVariableError(MultiExecError): 65 """Raised if watched variable changed (EXEC returns None).""" 66 67 68 class ChannelClosedError(RedisError): 69 """Raised when Pub/Sub channel is unsubscribed and messages queue is empty. 70 """ 71 72 73 class ReadOnlyError(RedisError): 74 """Raised from slave when read-only mode is enabled""" 75 76 77 class MasterNotFoundError(RedisError): 78 """Raised for sentinel master not found error.""" 79 80 81 class SlaveNotFoundError(RedisError): 82 """Raised for sentinel slave not found error.""" 83 84 85 class MasterReplyError(RedisError): 86 """Raised by sentinel client for master error replies.""" 87 88 89 class SlaveReplyError(RedisError): 90 """Raised by sentinel client for slave error replies.""" 91 92 93 class ConnectionClosedError(RedisError): 94 """Raised if connection to server was closed.""" 95 96 97 class ConnectionForcedCloseError(ConnectionClosedError): 98 """Raised if connection was closed with .close() method.""" 99 100 101 class PoolClosedError(RedisError): 102 """Raised if pool is closed.""" 103 [end of aioredis/errors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aioredis/errors.py b/aioredis/errors.py --- a/aioredis/errors.py +++ b/aioredis/errors.py @@ -50,6 +50,12 @@ MATCH_REPLY = ("NOAUTH ", "ERR invalid password") +class BusyGroupError(ReplyError): + """Raised if Consumer Group name already exists.""" + + MATCH_REPLY = "BUSYGROUP Consumer Group name already exists" + + class PipelineError(RedisError): """Raised if command within pipeline raised error."""
{"golden_diff": "diff --git a/aioredis/errors.py b/aioredis/errors.py\n--- a/aioredis/errors.py\n+++ b/aioredis/errors.py\n@@ -50,6 +50,12 @@\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n \n \n+class BusyGroupError(ReplyError):\n+ \"\"\"Raised if Consumer Group name already exists.\"\"\"\n+\n+ MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\n+\n+\n class PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n", "issue": "Add a BUSYGROUP reply error\nThe XGROUP CREATE command can return a BUSYGROUP error when a group already exists: https://redis.io/commands/xgroup\r\n\r\nI think the `ReplyError` subclass for matching it would look like this:\r\n\r\n```py\r\nclass BusyGroupError(ReplyError):\r\n MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\r\n```\n", "before_files": [{"content": "__all__ = [\n 'RedisError',\n 'ProtocolError',\n 'ReplyError',\n 'MaxClientsError',\n 'AuthError',\n 'PipelineError',\n 'MultiExecError',\n 'WatchVariableError',\n 'ChannelClosedError',\n 'ConnectionClosedError',\n 'ConnectionForcedCloseError',\n 'PoolClosedError',\n 'MasterNotFoundError',\n 'SlaveNotFoundError',\n 'ReadOnlyError',\n ]\n\n\nclass RedisError(Exception):\n \"\"\"Base exception class for aioredis exceptions.\"\"\"\n\n\nclass ProtocolError(RedisError):\n \"\"\"Raised when protocol error occurs.\"\"\"\n\n\nclass ReplyError(RedisError):\n \"\"\"Raised for redis error replies (-ERR).\"\"\"\n\n MATCH_REPLY = None\n\n def __new__(cls, msg, *args):\n for klass in cls.__subclasses__():\n if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):\n return klass(msg, *args)\n return super().__new__(cls, msg, *args)\n\n\nclass MaxClientsError(ReplyError):\n \"\"\"Raised for redis server when the maximum number of client has been\n reached.\"\"\"\n\n MATCH_REPLY = \"ERR max number of clients reached\"\n\n\nclass AuthError(ReplyError):\n \"\"\"Raised when authentication errors occurs.\"\"\"\n\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n\n\nclass PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n\n def __init__(self, errors):\n super().__init__('{} errors:'.format(self.__class__.__name__), errors)\n\n\nclass MultiExecError(PipelineError):\n \"\"\"Raised if command within MULTI/EXEC block caused error.\"\"\"\n\n\nclass WatchVariableError(MultiExecError):\n \"\"\"Raised if watched variable changed (EXEC returns None).\"\"\"\n\n\nclass ChannelClosedError(RedisError):\n \"\"\"Raised when Pub/Sub channel is unsubscribed and messages queue is empty.\n \"\"\"\n\n\nclass ReadOnlyError(RedisError):\n \"\"\"Raised from slave when read-only mode is enabled\"\"\"\n\n\nclass MasterNotFoundError(RedisError):\n \"\"\"Raised for sentinel master not found error.\"\"\"\n\n\nclass SlaveNotFoundError(RedisError):\n \"\"\"Raised for sentinel slave not found error.\"\"\"\n\n\nclass MasterReplyError(RedisError):\n \"\"\"Raised by sentinel client for master error replies.\"\"\"\n\n\nclass SlaveReplyError(RedisError):\n \"\"\"Raised by sentinel client for slave error replies.\"\"\"\n\n\nclass ConnectionClosedError(RedisError):\n \"\"\"Raised if connection to server was closed.\"\"\"\n\n\nclass ConnectionForcedCloseError(ConnectionClosedError):\n \"\"\"Raised if connection was closed with .close() method.\"\"\"\n\n\nclass PoolClosedError(RedisError):\n \"\"\"Raised if pool is closed.\"\"\"\n", "path": "aioredis/errors.py"}]}
1,383
118
gh_patches_debug_1898
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1813
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> OpenTelemetry distro as a default distro for OpenTelemetry Instrumentation The `opentelemetry-instrumentation` auto instrumentation doesn't work without installing `opentelemetry-distro` as the components initialisation is done in distro package. How does a regular user know about this and shouldn't openetemetry distro be the default and can give an option to let user use others? </issue> <code> [start of docs/getting_started/otlpcollector_example.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # otcollector.py 16 import time 17 18 from opentelemetry import trace 19 from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( 20 OTLPSpanExporter, 21 ) 22 from opentelemetry.sdk.trace import TracerProvider 23 from opentelemetry.sdk.trace.export import BatchSpanProcessor 24 25 span_exporter = OTLPSpanExporter( 26 # optional 27 # endpoint:="myCollectorURL:4317", 28 # credentials=ChannelCredentials(credentials), 29 # headers=(("metadata", "metadata")), 30 ) 31 tracer_provider = TracerProvider() 32 trace.set_tracer_provider(tracer_provider) 33 span_processor = BatchSpanProcessor(span_exporter) 34 tracer_provider.add_span_processor(span_processor) 35 36 # Configure the tracer to use the collector exporter 37 tracer = trace.get_tracer_provider().get_tracer(__name__) 38 39 with tracer.start_as_current_span("foo"): 40 print("Hello world!") 41 [end of docs/getting_started/otlpcollector_example.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py --- a/docs/getting_started/otlpcollector_example.py +++ b/docs/getting_started/otlpcollector_example.py @@ -24,7 +24,7 @@ span_exporter = OTLPSpanExporter( # optional - # endpoint:="myCollectorURL:4317", + # endpoint="myCollectorURL:4317", # credentials=ChannelCredentials(credentials), # headers=(("metadata", "metadata")), )
{"golden_diff": "diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py\n--- a/docs/getting_started/otlpcollector_example.py\n+++ b/docs/getting_started/otlpcollector_example.py\n@@ -24,7 +24,7 @@\n \n span_exporter = OTLPSpanExporter(\n # optional\n- # endpoint:=\"myCollectorURL:4317\",\n+ # endpoint=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n )\n", "issue": "OpenTelemetry distro as a default distro for OpenTelemetry Instrumentation\nThe `opentelemetry-instrumentation` auto instrumentation doesn't work without installing `opentelemetry-distro` as the components initialisation is done in distro package. How does a regular user know about this and shouldn't openetemetry distro be the default and can give an option to let user use others? \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint:=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}]}
1,016
127
gh_patches_debug_22463
rasdani/github-patches
git_diff
feast-dev__feast-3514
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> feast ui does not work on proxy subpath ## Expected Behavior Feast UI should work when it is served behind a proxy, on a subpath e.g. `/feast-ui` ## Current Behavior Parts of the feast UI works behind a subpath, but not entirely (nothing is displayed, just the feast logo with a "404" text - refer to screenshot). No requests in the network tab of the web browser are hitting 404. ![image](https://user-images.githubusercontent.com/12453748/216761133-a493bfa9-f752-4d23-9fc9-afc7cf4eb08b.png) ## Steps to reproduce Serve feast UI as you would e.g. `feature_store.serve_ui()`, optionally passing in the `root_path` parameter (it does not help). Set up an nginx pod with the following configuration (i.e. the nginx pod should have `/etc/nginx/conf.d/default.conf` with the following contents - `dummy_project` is the project name, and `http://feast-ui-service:8080` is where the feast UI can be accessed from your nginx pod / container): ``` server { listen 80 default_server; location = /feast-ui/ { rewrite (.*) /feast-ui/p/dummy_project permanent; } location /feast-ui/ { proxy_pass http://feast-ui-service:8080/; } location / { proxy_pass http://feast-ui-service:8080/; } } ``` This configuration works on localhost when nginx can listen on the root path `/`. However, note that the URL after all the redirects is wrong (it does not have the prefix). - The first block is required to force a redirect to the `/p/{project_name}`. Without this, the page will display 404 as above. - The second block is required to strip away `/feast-ui` so the UI app does not receive that path that it is not aware of - The third block is a trick to make this setup work in a local environment, because the app itself will redirect the user back to `/p/dummy_project` (without the prefix), which we then proxy into the feast UI app. However, in an actual environment, this setup does not work, because when the url does not contain the `/feast-ui` prefix, the ingress will not route it to the nginx pod, so the nginx pod cannot proxy the connection to the right place. Ideally, if the feast ui app is capable of being served on a subpath, only the second `location` block should be required in the nginx configuration. The first and third `location` blocks are workarounds. ### Specifications - Version: 0.29.0 ## Possible Solution The app should redirect to relative and not absolute paths </issue> <code> [start of sdk/python/feast/ui_server.py] 1 import json 2 import threading 3 from typing import Callable, Optional 4 5 import pkg_resources 6 import uvicorn 7 from fastapi import FastAPI, Response 8 from fastapi.middleware.cors import CORSMiddleware 9 from fastapi.staticfiles import StaticFiles 10 11 import feast 12 13 14 def get_app( 15 store: "feast.FeatureStore", 16 get_registry_dump: Callable, 17 project_id: str, 18 registry_ttl_secs: int, 19 host: str, 20 port: int, 21 ): 22 app = FastAPI() 23 24 app.add_middleware( 25 CORSMiddleware, 26 allow_origins=["*"], 27 allow_credentials=True, 28 allow_methods=["*"], 29 allow_headers=["*"], 30 ) 31 32 # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down 33 registry_proto = None 34 shutting_down = False 35 active_timer: Optional[threading.Timer] = None 36 37 def async_refresh(): 38 store.refresh_registry() 39 nonlocal registry_proto 40 registry_proto = store.registry.proto() 41 if shutting_down: 42 return 43 nonlocal active_timer 44 active_timer = threading.Timer(registry_ttl_secs, async_refresh) 45 active_timer.start() 46 47 @app.on_event("shutdown") 48 def shutdown_event(): 49 nonlocal shutting_down 50 shutting_down = True 51 if active_timer: 52 active_timer.cancel() 53 54 async_refresh() 55 56 ui_dir = pkg_resources.resource_filename(__name__, "ui/build/") 57 # Initialize with the projects-list.json file 58 with open(ui_dir + "projects-list.json", mode="w") as f: 59 projects_dict = { 60 "projects": [ 61 { 62 "name": "Project", 63 "description": "Test project", 64 "id": project_id, 65 "registryPath": "/registry", 66 } 67 ] 68 } 69 f.write(json.dumps(projects_dict)) 70 71 @app.get("/registry") 72 def read_registry(): 73 return Response( 74 content=registry_proto.SerializeToString(), 75 media_type="application/octet-stream", 76 ) 77 78 # For all other paths (such as paths that would otherwise be handled by react router), pass to React 79 @app.api_route("/p/{path_name:path}", methods=["GET"]) 80 def catch_all(): 81 filename = ui_dir + "index.html" 82 83 with open(filename) as f: 84 content = f.read() 85 86 return Response(content, media_type="text/html") 87 88 app.mount( 89 "/", 90 StaticFiles(directory=ui_dir, html=True), 91 name="site", 92 ) 93 94 return app 95 96 97 def start_server( 98 store: "feast.FeatureStore", 99 host: str, 100 port: int, 101 get_registry_dump: Callable, 102 project_id: str, 103 registry_ttl_sec: int, 104 root_path: str = "", 105 ): 106 app = get_app( 107 store, 108 get_registry_dump, 109 project_id, 110 registry_ttl_sec, 111 host, 112 port, 113 ) 114 assert root_path is not None 115 uvicorn.run(app, host=host, port=port, root_path=root_path) 116 [end of sdk/python/feast/ui_server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py --- a/sdk/python/feast/ui_server.py +++ b/sdk/python/feast/ui_server.py @@ -13,11 +13,9 @@ def get_app( store: "feast.FeatureStore", - get_registry_dump: Callable, project_id: str, registry_ttl_secs: int, - host: str, - port: int, + root_path: str = "", ): app = FastAPI() @@ -62,7 +60,7 @@ "name": "Project", "description": "Test project", "id": project_id, - "registryPath": "/registry", + "registryPath": f"{root_path}/registry", } ] } @@ -105,11 +103,8 @@ ): app = get_app( store, - get_registry_dump, project_id, registry_ttl_sec, - host, - port, + root_path, ) - assert root_path is not None - uvicorn.run(app, host=host, port=port, root_path=root_path) + uvicorn.run(app, host=host, port=port)
{"golden_diff": "diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py\n--- a/sdk/python/feast/ui_server.py\n+++ b/sdk/python/feast/ui_server.py\n@@ -13,11 +13,9 @@\n \n def get_app(\n store: \"feast.FeatureStore\",\n- get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n- host: str,\n- port: int,\n+ root_path: str = \"\",\n ):\n app = FastAPI()\n \n@@ -62,7 +60,7 @@\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n- \"registryPath\": \"/registry\",\n+ \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n@@ -105,11 +103,8 @@\n ):\n app = get_app(\n store,\n- get_registry_dump,\n project_id,\n registry_ttl_sec,\n- host,\n- port,\n+ root_path,\n )\n- assert root_path is not None\n- uvicorn.run(app, host=host, port=port, root_path=root_path)\n+ uvicorn.run(app, host=host, port=port)\n", "issue": "feast ui does not work on proxy subpath\n## Expected Behavior \r\n\r\nFeast UI should work when it is served behind a proxy, on a subpath e.g. `/feast-ui`\r\n\r\n## Current Behavior\r\n\r\nParts of the feast UI works behind a subpath, but not entirely (nothing is displayed, just the feast logo with a \"404\" text - refer to screenshot). No requests in the network tab of the web browser are hitting 404.\r\n\r\n![image](https://user-images.githubusercontent.com/12453748/216761133-a493bfa9-f752-4d23-9fc9-afc7cf4eb08b.png)\r\n\r\n## Steps to reproduce\r\n\r\nServe feast UI as you would e.g. `feature_store.serve_ui()`, optionally passing in the `root_path` parameter (it does not help).\r\n\r\nSet up an nginx pod with the following configuration (i.e. the nginx pod should have `/etc/nginx/conf.d/default.conf` with the following contents - `dummy_project` is the project name, and `http://feast-ui-service:8080` is where the feast UI can be accessed from your nginx pod / container):\r\n\r\n```\r\nserver {\r\n listen 80 default_server;\r\n\r\n location = /feast-ui/ {\r\n rewrite (.*) /feast-ui/p/dummy_project permanent;\r\n }\r\n\r\n location /feast-ui/ {\r\n proxy_pass http://feast-ui-service:8080/;\r\n }\r\n\r\n location / {\r\n proxy_pass http://feast-ui-service:8080/;\r\n }\r\n}\r\n```\r\n\r\nThis configuration works on localhost when nginx can listen on the root path `/`. However, note that the URL after all the redirects is wrong (it does not have the prefix).\r\n\r\n- The first block is required to force a redirect to the `/p/{project_name}`. Without this, the page will display 404 as above.\r\n- The second block is required to strip away `/feast-ui` so the UI app does not receive that path that it is not aware of\r\n- The third block is a trick to make this setup work in a local environment, because the app itself will redirect the user back to `/p/dummy_project` (without the prefix), which we then proxy into the feast UI app. However, in an actual environment, this setup does not work, because when the url does not contain the `/feast-ui` prefix, the ingress will not route it to the nginx pod, so the nginx pod cannot proxy the connection to the right place.\r\n\r\nIdeally, if the feast ui app is capable of being served on a subpath, only the second `location` block should be required in the nginx configuration. The first and third `location` blocks are workarounds.\r\n\r\n### Specifications\r\n\r\n- Version: 0.29.0\r\n\r\n## Possible Solution\r\n\r\nThe app should redirect to relative and not absolute paths\n", "before_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n host: str,\n port: int,\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": \"/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n get_registry_dump,\n project_id,\n registry_ttl_sec,\n host,\n port,\n )\n assert root_path is not None\n uvicorn.run(app, host=host, port=port, root_path=root_path)\n", "path": "sdk/python/feast/ui_server.py"}]}
2,058
281
gh_patches_debug_18421
rasdani/github-patches
git_diff
akvo__akvo-rsr-2512
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> More 504s: on the results framework page @Geerts reports on Skype: 504 Gateway timeout hunter strikes again: http://rsr.test.akvo.org/rest/v1/indicator_period_data_framework/?format=json&period__indicator__result__project=2780 Via: http://rsr.test.akvo.org/en/myrsr/results/2780/#results,13323,5679 </issue> <code> [start of akvo/rest/views/indicator_period_data.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment 9 10 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer, 11 IndicatorPeriodDataCommentSerializer) 12 from ..viewsets import PublicProjectViewSet 13 14 from django.http import HttpResponseForbidden 15 16 from rest_framework import status 17 from rest_framework.decorators import api_view, permission_classes 18 from rest_framework.response import Response 19 20 21 class IndicatorPeriodDataViewSet(PublicProjectViewSet): 22 """ 23 """ 24 queryset = IndicatorPeriodData.objects.all() 25 serializer_class = IndicatorPeriodDataSerializer 26 27 project_relation = 'period__indicator__result__project__' 28 29 30 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet): 31 """ 32 """ 33 queryset = IndicatorPeriodData.objects.all() 34 serializer_class = IndicatorPeriodDataFrameworkSerializer 35 project_relation = 'period__indicator__result__project__' 36 37 38 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet): 39 """ 40 """ 41 queryset = IndicatorPeriodDataComment.objects.all() 42 serializer_class = IndicatorPeriodDataCommentSerializer 43 project_relation = 'data__period__indicator__result__project__' 44 45 46 @api_view(['POST']) 47 def indicator_upload_file(request, pk=None): 48 """ 49 Special API call for directly uploading a file. 50 51 :param request; A Django request object. 52 :param pk; The primary key of an IndicatorPeriodData instance. 53 """ 54 update = IndicatorPeriodData.objects.get(pk=pk) 55 upload_file = request.data['file'] 56 57 # Permissions 58 user = getattr(request, 'user', None) 59 if not user: 60 return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN) 61 62 # TODO: Check if user is allowed to upload a file 63 # if not user.has_perm('rsr.change_project', update.period.indicator.result.project): 64 # return Response({'error': 'User has no permission to place an update'}, 65 # status=status.HTTP_403_FORBIDDEN) 66 67 try: 68 file_type = request.POST.copy()['type'] 69 if file_type == 'photo': 70 update.photo = upload_file 71 update.save(update_fields=['photo']) 72 return Response({'file': update.photo.url}) 73 elif file_type == 'file': 74 update.file = upload_file 75 update.save(update_fields=['file']) 76 return Response({'file': update.file.url}) 77 except Exception as e: 78 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST) 79 [end of akvo/rest/views/indicator_period_data.py] [start of akvo/rest/views/partnership.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import Partnership 9 10 from ..serializers import PartnershipSerializer, PartnershipBasicSerializer 11 from ..viewsets import PublicProjectViewSet 12 13 14 class PartnershipViewSet(PublicProjectViewSet): 15 """ 16 """ 17 queryset = Partnership.objects.all() 18 serializer_class = PartnershipSerializer 19 20 def get_queryset(self): 21 """Allow filtering on partner_type.""" 22 partner_type = self.request.query_params.get('partner_type', None) 23 if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys(): 24 self.queryset = self.queryset.filter( 25 iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type] 26 ).distinct() 27 return super(PartnershipViewSet, self).get_queryset() 28 29 30 class PartnershipMoreLinkViewSet(PublicProjectViewSet): 31 """ 32 Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of 33 an organisation and the partnership role. 34 """ 35 queryset = Partnership.objects.all() 36 serializer_class = PartnershipBasicSerializer 37 [end of akvo/rest/views/partnership.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py --- a/akvo/rest/views/indicator_period_data.py +++ b/akvo/rest/views/indicator_period_data.py @@ -30,7 +30,13 @@ class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet): """ """ - queryset = IndicatorPeriodData.objects.all() + queryset = IndicatorPeriodData.objects.select_related( + 'period', + 'user' + ).prefetch_related( + 'comments', + 'comments__user' + ).all() serializer_class = IndicatorPeriodDataFrameworkSerializer project_relation = 'period__indicator__result__project__' diff --git a/akvo/rest/views/partnership.py b/akvo/rest/views/partnership.py --- a/akvo/rest/views/partnership.py +++ b/akvo/rest/views/partnership.py @@ -14,7 +14,7 @@ class PartnershipViewSet(PublicProjectViewSet): """ """ - queryset = Partnership.objects.all() + queryset = Partnership.objects.select_related('organisation', 'project').all() serializer_class = PartnershipSerializer def get_queryset(self):
{"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -30,7 +30,13 @@\n class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n- queryset = IndicatorPeriodData.objects.all()\n+ queryset = IndicatorPeriodData.objects.select_related(\n+ 'period',\n+ 'user'\n+ ).prefetch_related(\n+ 'comments',\n+ 'comments__user'\n+ ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n \ndiff --git a/akvo/rest/views/partnership.py b/akvo/rest/views/partnership.py\n--- a/akvo/rest/views/partnership.py\n+++ b/akvo/rest/views/partnership.py\n@@ -14,7 +14,7 @@\n class PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n- queryset = Partnership.objects.all()\n+ queryset = Partnership.objects.select_related('organisation', 'project').all()\n serializer_class = PartnershipSerializer\n \n def get_queryset(self):\n", "issue": "More 504s: on the results framework page\n@Geerts reports on Skype: 504 Gateway timeout hunter strikes again: http://rsr.test.akvo.org/rest/v1/indicator_period_data_framework/?format=json&period__indicator__result__project=2780\r\n\r\nVia: http://rsr.test.akvo.org/en/myrsr/results/2780/#results,13323,5679\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.data['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import Partnership\n\nfrom ..serializers import PartnershipSerializer, PartnershipBasicSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipSerializer\n\n def get_queryset(self):\n \"\"\"Allow filtering on partner_type.\"\"\"\n partner_type = self.request.query_params.get('partner_type', None)\n if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():\n self.queryset = self.queryset.filter(\n iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]\n ).distinct()\n return super(PartnershipViewSet, self).get_queryset()\n\n\nclass PartnershipMoreLinkViewSet(PublicProjectViewSet):\n \"\"\"\n Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of\n an organisation and the partnership role.\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipBasicSerializer\n", "path": "akvo/rest/views/partnership.py"}]}
1,767
269
gh_patches_debug_33321
rasdani/github-patches
git_diff
fedora-infra__bodhi-3173
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support Sourceware bugs in Fedora enhanced markdown Many key projects reside on sourceware.org including glibc, gdb, binutils, elfutils, libabigail, systemtap etc. Could you please add markdown support for sourceware.org bugzilla (https://sourceware.org/bugzilla/)? I suggest a unified markup of SWBZ#XXXX or SW#XXXX for all projects on the main sourceware bugzilla instance. Likewise gcc compiler bugs are also on sourceware but use a distinct instance (https://gcc.gnu.org/bugzilla/) I suggest a markup of GCC#XXXX for gcc bugs. Thank you! </issue> <code> [start of bodhi/server/ffmarkdown.py] 1 # Copyright © 2014-2019 Red Hat, Inc. and others. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 18 # USA. 19 """ 20 Fedora-flavored Markdown. 21 22 Author: Ralph Bean <[email protected]> 23 """ 24 25 from markdown.extensions import Extension 26 import markdown.inlinepatterns 27 import markdown.postprocessors 28 import markdown.util 29 import pyramid.threadlocal 30 31 from bodhi import MENTION_RE 32 33 34 BUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})' 35 36 37 def user_url(name): 38 """ 39 Return a URL to the given username. 40 41 Args: 42 name (basestring): The username of the user we want a URL for. 43 Returns: 44 basestring: A URL to the requested user. 45 """ 46 request = pyramid.threadlocal.get_current_request() 47 return request.route_url('user', name=name) 48 49 50 def bug_url(tracker, idx): 51 """ 52 Return the URL for the given bug. 53 54 Args: 55 tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora', 56 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'. 57 idx (basestring or int): The bug number. 58 Returns: 59 basestring: The URL of the given bug. 60 Raises: 61 KeyError: If the given tracker is not supported by this function. 62 """ 63 try: 64 return { 65 'fedora': "https://bugzilla.redhat.com/show_bug.cgi?id=%s", 66 'gnome': "https://bugzilla.gnome.org/show_bug.cgi?id=%s", 67 'kde': "https://bugs.kde.org/show_bug.cgi?id=%s", 68 'mozilla': "https://bugzilla.mozilla.org/show_bug.cgi?id=%s", 69 'pear': "http://pear.php.net/bugs/bug.php?id=%s", 70 'perl': "https://rt.cpan.org/Public/Bug/Display.html?id=%s", 71 'php': "https://bugs.php.net/bug.php?id=%s", 72 'python': "https://bugs.python.org/issue%s", 73 'rh': "https://bugzilla.redhat.com/show_bug.cgi?id=%s", 74 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s"}[tracker.lower()] % idx 75 76 except KeyError: 77 return None 78 79 80 class MentionPattern(markdown.inlinepatterns.Pattern): 81 """Match username mentions and point to their profiles.""" 82 83 def handleMatch(self, m): 84 """ 85 Build and return an Element that links to the matched User's profile. 86 87 Args: 88 m (re.MatchObject): The regex match on the username. 89 Return: 90 xml.etree.Element: An html anchor referencing the user's profile. 91 """ 92 el = markdown.util.etree.Element("a") 93 name = markdown.util.AtomicString(m.group(2)) 94 el.set('href', user_url(name[1:])) 95 el.text = name 96 return el 97 98 99 class BugzillaPattern(markdown.inlinepatterns.Pattern): 100 """Match bug tracker patterns.""" 101 102 def handleMatch(self, m): 103 """ 104 Build and return an Element that links to the referenced bug. 105 106 Args: 107 m (re.MatchObject): The regex match on the bug. 108 Returns: 109 xml.etree.Element: An html anchor referencing the matched bug. 110 """ 111 tracker = markdown.util.AtomicString(m.group(2)) 112 idx = markdown.util.AtomicString(m.group(3)) 113 url = bug_url(tracker, idx[1:]) 114 115 if url is None: 116 return tracker + idx 117 118 el = markdown.util.etree.Element("a") 119 el.set('href', url) 120 el.text = idx 121 return el 122 123 124 class SurroundProcessor(markdown.postprocessors.Postprocessor): 125 """A postprocessor to surround the text with a markdown <div>.""" 126 127 def run(self, text): 128 """ 129 Return text wrapped in a <div> with a markdown class. 130 131 Args: 132 text (str): The text to wrap in a <div>. 133 Returns: 134 str: The text wrapped in a <div>. 135 """ 136 return "<div class='markdown'>" + text + "</div>" 137 138 139 class BodhiExtension(Extension): 140 """Bodhi's markdown Extension.""" 141 142 def extendMarkdown(self, md, md_globals): 143 """ 144 Extend markdown to add our patterns and postprocessor. 145 146 Args: 147 md (Markdown): An instance of the Markdown class. 148 md_globals (dict): Contains all the various global variables within the markdown module. 149 """ 150 md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end') 151 md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end') 152 md.postprocessors.add('surround', SurroundProcessor(md), '_end') 153 [end of bodhi/server/ffmarkdown.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bodhi/server/ffmarkdown.py b/bodhi/server/ffmarkdown.py --- a/bodhi/server/ffmarkdown.py +++ b/bodhi/server/ffmarkdown.py @@ -53,7 +53,8 @@ Args: tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora', - 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'. + 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz' + or 'sourceware'. idx (basestring or int): The bug number. Returns: basestring: The URL of the given bug. @@ -61,17 +62,21 @@ KeyError: If the given tracker is not supported by this function. """ try: - return { + trackers = { 'fedora': "https://bugzilla.redhat.com/show_bug.cgi?id=%s", + 'gcc': "https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s", 'gnome': "https://bugzilla.gnome.org/show_bug.cgi?id=%s", 'kde': "https://bugs.kde.org/show_bug.cgi?id=%s", 'mozilla': "https://bugzilla.mozilla.org/show_bug.cgi?id=%s", - 'pear': "http://pear.php.net/bugs/bug.php?id=%s", + 'pear': "https://pear.php.net/bugs/bug.php?id=%s", 'perl': "https://rt.cpan.org/Public/Bug/Display.html?id=%s", 'php': "https://bugs.php.net/bug.php?id=%s", 'python': "https://bugs.python.org/issue%s", 'rh': "https://bugzilla.redhat.com/show_bug.cgi?id=%s", - 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s"}[tracker.lower()] % idx + 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s", + 'sourceware': "https://sourceware.org/bugzilla/show_bug.cgi?id=%s"} + + return trackers[tracker.lower()] % idx except KeyError: return None
{"golden_diff": "diff --git a/bodhi/server/ffmarkdown.py b/bodhi/server/ffmarkdown.py\n--- a/bodhi/server/ffmarkdown.py\n+++ b/bodhi/server/ffmarkdown.py\n@@ -53,7 +53,8 @@\n \n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n- 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.\n+ 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'\n+ or 'sourceware'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n@@ -61,17 +62,21 @@\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n- return {\n+ trackers = {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n+ 'gcc': \"https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n- 'pear': \"http://pear.php.net/bugs/bug.php?id=%s\",\n+ 'pear': \"https://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n- 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\"}[tracker.lower()] % idx\n+ 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n+ 'sourceware': \"https://sourceware.org/bugzilla/show_bug.cgi?id=%s\"}\n+\n+ return trackers[tracker.lower()] % idx\n \n except KeyError:\n return None\n", "issue": "Support Sourceware bugs in Fedora enhanced markdown\nMany key projects reside on sourceware.org including glibc, gdb, binutils, elfutils, libabigail, systemtap etc.\r\n\r\nCould you please add markdown support for sourceware.org bugzilla (https://sourceware.org/bugzilla/)?\r\n\r\nI suggest a unified markup of SWBZ#XXXX or SW#XXXX for all projects on the main sourceware bugzilla instance.\r\n\r\nLikewise gcc compiler bugs are also on sourceware but use a distinct instance (https://gcc.gnu.org/bugzilla/)\r\n\r\nI suggest a markup of GCC#XXXX for gcc bugs.\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\"\"\"\nFedora-flavored Markdown.\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nfrom markdown.extensions import Extension\nimport markdown.inlinepatterns\nimport markdown.postprocessors\nimport markdown.util\nimport pyramid.threadlocal\n\nfrom bodhi import MENTION_RE\n\n\nBUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'\n\n\ndef user_url(name):\n \"\"\"\n Return a URL to the given username.\n\n Args:\n name (basestring): The username of the user we want a URL for.\n Returns:\n basestring: A URL to the requested user.\n \"\"\"\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bug_url(tracker, idx):\n \"\"\"\n Return the URL for the given bug.\n\n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n Raises:\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n return {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n 'pear': \"http://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\"}[tracker.lower()] % idx\n\n except KeyError:\n return None\n\n\nclass MentionPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match username mentions and point to their profiles.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the matched User's profile.\n\n Args:\n m (re.MatchObject): The regex match on the username.\n Return:\n xml.etree.Element: An html anchor referencing the user's profile.\n \"\"\"\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n\nclass BugzillaPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match bug tracker patterns.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the referenced bug.\n\n Args:\n m (re.MatchObject): The regex match on the bug.\n Returns:\n xml.etree.Element: An html anchor referencing the matched bug.\n \"\"\"\n tracker = markdown.util.AtomicString(m.group(2))\n idx = markdown.util.AtomicString(m.group(3))\n url = bug_url(tracker, idx[1:])\n\n if url is None:\n return tracker + idx\n\n el = markdown.util.etree.Element(\"a\")\n el.set('href', url)\n el.text = idx\n return el\n\n\nclass SurroundProcessor(markdown.postprocessors.Postprocessor):\n \"\"\"A postprocessor to surround the text with a markdown <div>.\"\"\"\n\n def run(self, text):\n \"\"\"\n Return text wrapped in a <div> with a markdown class.\n\n Args:\n text (str): The text to wrap in a <div>.\n Returns:\n str: The text wrapped in a <div>.\n \"\"\"\n return \"<div class='markdown'>\" + text + \"</div>\"\n\n\nclass BodhiExtension(Extension):\n \"\"\"Bodhi's markdown Extension.\"\"\"\n\n def extendMarkdown(self, md, md_globals):\n \"\"\"\n Extend markdown to add our patterns and postprocessor.\n\n Args:\n md (Markdown): An instance of the Markdown class.\n md_globals (dict): Contains all the various global variables within the markdown module.\n \"\"\"\n md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')\n md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')\n md.postprocessors.add('surround', SurroundProcessor(md), '_end')\n", "path": "bodhi/server/ffmarkdown.py"}]}
2,234
524
gh_patches_debug_60611
rasdani/github-patches
git_diff
cloudtools__troposphere-605
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: IAM Role title should not be validated The title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77). The next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`: ``` python >>> import boto3 >>> import troposphere >>> import troposphere.iam >>> import time >>> >>> long_title = 'VeryLongName{}'.format('Z' * 100) >>> print len(long_title) 112 >>> >>> # create a role ... >>> role = troposphere.iam.Role( ... long_title, ... AssumeRolePolicyDocument={ ... "Statement": [{ ... "Action": ["sts:AssumeRole"], ... "Effect": "Allow", ... "Principal": {"Service": ["ec2.amazonaws.com"]} ... }] ... }) Traceback (most recent call last): File "<stdin>", line 8, in <module> File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py", line 44, in __init__ self.validate_title() File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py", line 77, in validate_title iam_role_name(self.title) File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py", line 98, in iam_role_name raise ValueError('IAM Role Name may not exceed 64 characters') ValueError: IAM Role Name may not exceed 64 characters >>> >>> >>> # delete validator ... >>> del troposphere.iam.Role.validate_title >>> # try again ... >>> role = troposphere.iam.Role( ... long_title, ... AssumeRolePolicyDocument={ ... "Statement": [{ ... "Action": ["sts:AssumeRole"], ... "Effect": "Allow", ... "Principal": {"Service": ["ec2.amazonaws.com"]} ... }] ... }) >>> template = troposphere.Template() >>> template.add_resource(role) <troposphere.iam.Role object at 0x10ee02990> >>> print template.to_json() { "Resources": { "VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ": { "Properties": { "AssumeRolePolicyDocument": { "Statement": [ { "Action": [ "sts:AssumeRole" ], "Effect": "Allow", "Principal": { "Service": [ "ec2.amazonaws.com" ] } } ] } }, "Type": "AWS::IAM::Role" } } } >>> client = boto3.client('cloudformation', 'us-east-1') >>> stack = client.create_stack( ... StackName='testTroposphere', ... TemplateBody=template.to_json(), ... Capabilities=['CAPABILITY_IAM']) >>> >>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE': ... import time ... time.sleep(1) ... >>> resources = client.describe_stack_resources(StackName=stack['StackId']) >>> for r in resources['StackResources']: ... physical_id = r['PhysicalResourceId'] ... print("{} ({} chars)".format(physical_id, len(physical_id))) ... testTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars) ``` The snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too). </issue> <code> [start of troposphere/iam.py] 1 # Copyright (c) 2012-2013, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSObject, AWSProperty 7 from .validators import integer, boolean, status 8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name 9 10 try: 11 from awacs.aws import Policy 12 policytypes = (dict, Policy) 13 except ImportError: 14 policytypes = dict, 15 16 17 Active = "Active" 18 Inactive = "Inactive" 19 20 21 class AccessKey(AWSObject): 22 resource_type = "AWS::IAM::AccessKey" 23 24 props = { 25 'Serial': (integer, False), 26 'Status': (status, False), 27 'UserName': (basestring, True), 28 } 29 30 31 class PolicyType(AWSObject): 32 resource_type = "AWS::IAM::Policy" 33 34 props = { 35 'Groups': ([basestring], False), 36 'PolicyDocument': (policytypes, True), 37 'PolicyName': (basestring, True), 38 'Roles': ([basestring], False), 39 'Users': ([basestring], False), 40 } 41 42 43 class Policy(AWSProperty): 44 props = { 45 'PolicyDocument': (policytypes, True), 46 'PolicyName': (basestring, True), 47 } 48 49 PolicyProperty = Policy 50 51 52 class Group(AWSObject): 53 def validate_title(self): 54 iam_group_name(self.title) 55 56 resource_type = "AWS::IAM::Group" 57 58 props = { 59 'GroupName': (iam_group_name, False), 60 'ManagedPolicyArns': ([basestring], False), 61 'Path': (iam_path, False), 62 'Policies': ([Policy], False), 63 } 64 65 66 class InstanceProfile(AWSObject): 67 resource_type = "AWS::IAM::InstanceProfile" 68 69 props = { 70 'Path': (iam_path, False), 71 'Roles': (list, True), 72 } 73 74 75 class Role(AWSObject): 76 def validate_title(self): 77 iam_role_name(self.title) 78 79 resource_type = "AWS::IAM::Role" 80 81 props = { 82 'AssumeRolePolicyDocument': (policytypes, True), 83 'ManagedPolicyArns': ([basestring], False), 84 'Path': (iam_path, False), 85 'Policies': ([Policy], False), 86 'RoleName': (iam_role_name, False), 87 } 88 89 90 class LoginProfile(AWSProperty): 91 props = { 92 'Password': (basestring, True), 93 'PasswordResetRequired': (boolean, False), 94 } 95 96 97 class User(AWSObject): 98 resource_type = "AWS::IAM::User" 99 100 props = { 101 'Path': (iam_path, False), 102 'Groups': ([basestring], False), 103 'ManagedPolicyArns': ([basestring], False), 104 'LoginProfile': (LoginProfile, False), 105 'Policies': ([Policy], False), 106 'UserName': (iam_user_name, False), 107 } 108 109 110 class UserToGroupAddition(AWSObject): 111 resource_type = "AWS::IAM::UserToGroupAddition" 112 113 props = { 114 'GroupName': (basestring, True), 115 'Users': (list, True), 116 } 117 118 119 class ManagedPolicy(AWSObject): 120 resource_type = "AWS::IAM::ManagedPolicy" 121 122 props = { 123 'Description': (basestring, False), 124 'Groups': ([basestring], False), 125 'Path': (iam_path, False), 126 'PolicyDocument': (policytypes, True), 127 'Roles': ([basestring], False), 128 'Users': ([basestring], False), 129 } 130 [end of troposphere/iam.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/iam.py b/troposphere/iam.py --- a/troposphere/iam.py +++ b/troposphere/iam.py @@ -50,9 +50,6 @@ class Group(AWSObject): - def validate_title(self): - iam_group_name(self.title) - resource_type = "AWS::IAM::Group" props = { @@ -73,9 +70,6 @@ class Role(AWSObject): - def validate_title(self): - iam_role_name(self.title) - resource_type = "AWS::IAM::Role" props = {
{"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -50,9 +50,6 @@\n \n \n class Group(AWSObject):\n- def validate_title(self):\n- iam_group_name(self.title)\n-\n resource_type = \"AWS::IAM::Group\"\n \n props = {\n@@ -73,9 +70,6 @@\n \n \n class Role(AWSObject):\n- def validate_title(self):\n- iam_role_name(self.title)\n-\n resource_type = \"AWS::IAM::Role\"\n \n props = {\n", "issue": "BUG: IAM Role title should not be validated\nThe title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77).\n\nThe next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`:\n\n``` python\n>>> import boto3\n>>> import troposphere\n>>> import troposphere.iam\n>>> import time\n>>>\n>>> long_title = 'VeryLongName{}'.format('Z' * 100)\n>>> print len(long_title)\n112\n>>>\n>>> # create a role\n...\n>>> role = troposphere.iam.Role(\n... long_title,\n... AssumeRolePolicyDocument={\n... \"Statement\": [{\n... \"Action\": [\"sts:AssumeRole\"],\n... \"Effect\": \"Allow\",\n... \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n... }]\n... })\nTraceback (most recent call last):\n File \"<stdin>\", line 8, in <module>\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py\", line 44, in __init__\n self.validate_title()\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py\", line 77, in validate_title\n iam_role_name(self.title)\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py\", line 98, in iam_role_name\n raise ValueError('IAM Role Name may not exceed 64 characters')\nValueError: IAM Role Name may not exceed 64 characters\n>>>\n>>>\n>>> # delete validator\n...\n>>> del troposphere.iam.Role.validate_title\n>>> # try again\n...\n>>> role = troposphere.iam.Role(\n... long_title,\n... AssumeRolePolicyDocument={\n... \"Statement\": [{\n... \"Action\": [\"sts:AssumeRole\"],\n... \"Effect\": \"Allow\",\n... \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n... }]\n... })\n>>> template = troposphere.Template()\n>>> template.add_resource(role)\n<troposphere.iam.Role object at 0x10ee02990>\n>>> print template.to_json()\n{\n \"Resources\": {\n \"VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\": {\n \"Properties\": {\n \"AssumeRolePolicyDocument\": {\n \"Statement\": [\n {\n \"Action\": [\n \"sts:AssumeRole\"\n ],\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": [\n \"ec2.amazonaws.com\"\n ]\n }\n }\n ]\n }\n },\n \"Type\": \"AWS::IAM::Role\"\n }\n }\n}\n>>> client = boto3.client('cloudformation', 'us-east-1')\n>>> stack = client.create_stack(\n... StackName='testTroposphere',\n... TemplateBody=template.to_json(),\n... Capabilities=['CAPABILITY_IAM'])\n>>>\n>>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE':\n... import time\n... time.sleep(1)\n...\n>>> resources = client.describe_stack_resources(StackName=stack['StackId'])\n>>> for r in resources['StackResources']:\n... physical_id = r['PhysicalResourceId']\n... print(\"{} ({} chars)\".format(physical_id, len(physical_id)))\n...\ntestTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars)\n```\n\nThe snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too).\n\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n def validate_title(self):\n iam_group_name(self.title)\n\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n def validate_title(self):\n iam_role_name(self.title)\n\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]}
2,571
138
gh_patches_debug_12568
rasdani/github-patches
git_diff
Kinto__kinto-474
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Do not require cliquet master branch in dev As discussed with @Natim @almet </issue> <code> [start of kinto/config/__init__.py] 1 import os 2 import binascii 3 import codecs 4 from kinto import logger 5 6 HERE = os.path.abspath(os.path.dirname(__file__)) 7 8 9 def render_template(template, destination, **kwargs): 10 template = os.path.join(HERE, template) 11 folder = os.path.dirname(destination) 12 13 if not os.path.exists(folder): 14 os.makedirs(folder) 15 16 logger.info("Created config {}".format(os.path.abspath(destination))) 17 18 with codecs.open(template, 'r', encoding='utf-8') as f: 19 raw_template = f.read() 20 rendered = raw_template.format(**kwargs) 21 with codecs.open(destination, 'w+', encoding='utf-8') as output: 22 output.write(rendered) 23 24 25 def init(config_file, backend): 26 values = {} 27 values['secret'] = binascii.b2a_hex(os.urandom(32)) 28 29 values['storage_backend'] = "cliquet.storage.%s" % backend 30 values['cache_backend'] = "cliquet.cache.%s" % backend 31 values['permission_backend'] = "cliquet.permission.%s" % backend 32 33 if backend == 'postgresql': 34 postgresql_url = "postgres://postgres:postgres@localhost/postgres" 35 values['storage_url'] = postgresql_url 36 values['cache_url'] = postgresql_url 37 values['permission_url'] = postgresql_url 38 39 elif backend == 'redis': 40 redis_url = "redis://localhost:6379" 41 values['storage_url'] = redis_url + "/1" 42 values['cache_url'] = redis_url + "/2" 43 values['permission_url'] = redis_url + "/3" 44 45 else: 46 values['storage_url'] = '' 47 values['cache_url'] = '' 48 values['permission_url'] = '' 49 50 render_template("kinto.tpl", config_file, **values) 51 [end of kinto/config/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py --- a/kinto/config/__init__.py +++ b/kinto/config/__init__.py @@ -1,6 +1,8 @@ import os -import binascii import codecs + +from cliquet import utils as cliquet_utils + from kinto import logger HERE = os.path.abspath(os.path.dirname(__file__)) @@ -24,7 +26,8 @@ def init(config_file, backend): values = {} - values['secret'] = binascii.b2a_hex(os.urandom(32)) + + values['secret'] = cliquet_utils.random_bytes_hex(32) values['storage_backend'] = "cliquet.storage.%s" % backend values['cache_backend'] = "cliquet.cache.%s" % backend
{"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -1,6 +1,8 @@\n import os\n-import binascii\n import codecs\n+\n+from cliquet import utils as cliquet_utils\n+\n from kinto import logger\n \n HERE = os.path.abspath(os.path.dirname(__file__))\n@@ -24,7 +26,8 @@\n \n def init(config_file, backend):\n values = {}\n- values['secret'] = binascii.b2a_hex(os.urandom(32))\n+\n+ values['secret'] = cliquet_utils.random_bytes_hex(32)\n \n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n", "issue": "Do not require cliquet master branch in dev\nAs discussed with @Natim @almet \n\n", "before_files": [{"content": "import os\nimport binascii\nimport codecs\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n values['secret'] = binascii.b2a_hex(os.urandom(32))\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}]}
1,046
189
gh_patches_debug_21239
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-3068
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Celery queue names inconsistent **Is your feature request related to a problem? Please describe.** In the admin interface, the Celery Status tab shows all the current queues. They seemed to be only ever-increasing recently, so I had to look into why that happened. After a lot of wasting of time, I figured out how to properly get into Flower, (and how to start it...) Here, I discovered that the Celery worker was ignoring all but four of the queues, so I had to manually add them. This did not really seem to be a problem. However, when I entered `broadcasts`, as that is what the queue is named in the admin interface, nothing happened. An investigation later I found out that the queue was actually called `broadcast`, singular. **Describe the solution you'd like** So, please fix that. Either change the name of the queue, or change it in the admin interface, so that someone can look at the admin interface to know what the queue names are... **Describe alternatives you've considered** N/A **Additional context** N/A </issue> <code> [start of bookwyrm/views/admin/celery_status.py] 1 """ celery status """ 2 import json 3 4 from django.contrib.auth.decorators import login_required, permission_required 5 from django.http import HttpResponse 6 from django.template.response import TemplateResponse 7 from django.utils.decorators import method_decorator 8 from django.views import View 9 from django.views.decorators.http import require_GET 10 from django import forms 11 import redis 12 13 from celerywyrm import settings 14 from bookwyrm.tasks import ( 15 app as celery, 16 LOW, 17 MEDIUM, 18 HIGH, 19 STREAMS, 20 IMAGES, 21 SUGGESTED_USERS, 22 EMAIL, 23 CONNECTORS, 24 LISTS, 25 INBOX, 26 IMPORTS, 27 IMPORT_TRIGGERED, 28 BROADCAST, 29 MISC, 30 ) 31 32 r = redis.from_url(settings.REDIS_BROKER_URL) 33 34 # pylint: disable= no-self-use 35 @method_decorator(login_required, name="dispatch") 36 @method_decorator( 37 permission_required("bookwyrm.edit_instance_settings", raise_exception=True), 38 name="dispatch", 39 ) 40 class CeleryStatus(View): 41 """Are your tasks running? Well you'd better go catch them""" 42 43 def get(self, request): 44 """See workers and active tasks""" 45 errors = [] 46 try: 47 inspect = celery.control.inspect() 48 stats = inspect.stats() 49 active_tasks = inspect.active() 50 # pylint: disable=broad-except 51 except Exception as err: 52 stats = active_tasks = None 53 errors.append(err) 54 55 try: 56 queues = { 57 LOW: r.llen(LOW), 58 MEDIUM: r.llen(MEDIUM), 59 HIGH: r.llen(HIGH), 60 STREAMS: r.llen(STREAMS), 61 IMAGES: r.llen(IMAGES), 62 SUGGESTED_USERS: r.llen(SUGGESTED_USERS), 63 EMAIL: r.llen(EMAIL), 64 CONNECTORS: r.llen(CONNECTORS), 65 LISTS: r.llen(LISTS), 66 INBOX: r.llen(INBOX), 67 IMPORTS: r.llen(IMPORTS), 68 IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED), 69 BROADCAST: r.llen(BROADCAST), 70 MISC: r.llen(MISC), 71 } 72 # pylint: disable=broad-except 73 except Exception as err: 74 queues = None 75 errors.append(err) 76 77 form = ClearCeleryForm() 78 79 data = { 80 "stats": stats, 81 "active_tasks": active_tasks, 82 "queues": queues, 83 "form": form, 84 "errors": errors, 85 } 86 return TemplateResponse(request, "settings/celery.html", data) 87 88 def post(self, request): 89 """Submit form to clear queues""" 90 form = ClearCeleryForm(request.POST) 91 if form.is_valid(): 92 if len(celery.control.ping()) != 0: 93 return HttpResponse( 94 "Refusing to delete tasks while Celery worker is active" 95 ) 96 pipeline = r.pipeline() 97 for queue in form.cleaned_data["queues"]: 98 for task in r.lrange(queue, 0, -1): 99 task_json = json.loads(task) 100 if task_json["headers"]["task"] in form.cleaned_data["tasks"]: 101 pipeline.lrem(queue, 0, task) 102 results = pipeline.execute() 103 104 return HttpResponse(f"Deleted {sum(results)} tasks") 105 106 107 class ClearCeleryForm(forms.Form): 108 """Form to clear queues""" 109 110 queues = forms.MultipleChoiceField( 111 label="Queues", 112 choices=[ 113 (LOW, "Low prioirty"), 114 (MEDIUM, "Medium priority"), 115 (HIGH, "High priority"), 116 (STREAMS, "Streams"), 117 (IMAGES, "Images"), 118 (SUGGESTED_USERS, "Suggested users"), 119 (EMAIL, "Email"), 120 (CONNECTORS, "Connectors"), 121 (LISTS, "Lists"), 122 (INBOX, "Inbox"), 123 (IMPORTS, "Imports"), 124 (IMPORT_TRIGGERED, "Import triggered"), 125 (BROADCAST, "Broadcasts"), 126 (MISC, "Misc"), 127 ], 128 widget=forms.CheckboxSelectMultiple, 129 ) 130 tasks = forms.MultipleChoiceField( 131 label="Tasks", choices=[], widget=forms.CheckboxSelectMultiple 132 ) 133 134 def __init__(self, *args, **kwargs): 135 super().__init__(*args, **kwargs) 136 celery.loader.import_default_modules() 137 self.fields["tasks"].choices = sorted( 138 [(k, k) for k in celery.tasks.keys() if not k.startswith("celery.")] 139 ) 140 141 142 @require_GET 143 # pylint: disable=unused-argument 144 def celery_ping(request): 145 """Just tells you if Celery is on or not""" 146 try: 147 ping = celery.control.inspect().ping() 148 if ping: 149 return HttpResponse() 150 # pylint: disable=broad-except 151 except Exception: 152 pass 153 154 return HttpResponse(status=500) 155 [end of bookwyrm/views/admin/celery_status.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/views/admin/celery_status.py b/bookwyrm/views/admin/celery_status.py --- a/bookwyrm/views/admin/celery_status.py +++ b/bookwyrm/views/admin/celery_status.py @@ -110,20 +110,20 @@ queues = forms.MultipleChoiceField( label="Queues", choices=[ - (LOW, "Low prioirty"), + (LOW, "Low priority"), (MEDIUM, "Medium priority"), (HIGH, "High priority"), - (STREAMS, "Streams"), - (IMAGES, "Images"), - (SUGGESTED_USERS, "Suggested users"), - (EMAIL, "Email"), + (BROADCAST, "Broadcast"), (CONNECTORS, "Connectors"), - (LISTS, "Lists"), - (INBOX, "Inbox"), + (EMAIL, "Email"), + (IMAGES, "Images"), (IMPORTS, "Imports"), (IMPORT_TRIGGERED, "Import triggered"), - (BROADCAST, "Broadcasts"), + (INBOX, "Inbox"), + (LISTS, "Lists"), (MISC, "Misc"), + (STREAMS, "Streams"), + (SUGGESTED_USERS, "Suggested users"), ], widget=forms.CheckboxSelectMultiple, )
{"golden_diff": "diff --git a/bookwyrm/views/admin/celery_status.py b/bookwyrm/views/admin/celery_status.py\n--- a/bookwyrm/views/admin/celery_status.py\n+++ b/bookwyrm/views/admin/celery_status.py\n@@ -110,20 +110,20 @@\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n- (LOW, \"Low prioirty\"),\n+ (LOW, \"Low priority\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n- (STREAMS, \"Streams\"),\n- (IMAGES, \"Images\"),\n- (SUGGESTED_USERS, \"Suggested users\"),\n- (EMAIL, \"Email\"),\n+ (BROADCAST, \"Broadcast\"),\n (CONNECTORS, \"Connectors\"),\n- (LISTS, \"Lists\"),\n- (INBOX, \"Inbox\"),\n+ (EMAIL, \"Email\"),\n+ (IMAGES, \"Images\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n- (BROADCAST, \"Broadcasts\"),\n+ (INBOX, \"Inbox\"),\n+ (LISTS, \"Lists\"),\n (MISC, \"Misc\"),\n+ (STREAMS, \"Streams\"),\n+ (SUGGESTED_USERS, \"Suggested users\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n", "issue": " Celery queue names inconsistent\n**Is your feature request related to a problem? Please describe.**\r\nIn the admin interface, the Celery Status tab shows all the current queues.\r\nThey seemed to be only ever-increasing recently, so I had to look into why that happened.\r\nAfter a lot of wasting of time, I figured out how to properly get into Flower, (and how to start it...)\r\nHere, I discovered that the Celery worker was ignoring all but four of the queues, so I had to manually add them.\r\nThis did not really seem to be a problem. However, when I entered `broadcasts`, as that is what the queue is named in the admin interface, nothing happened. An investigation later I found out that the queue was actually called `broadcast`, singular.\r\n\r\n**Describe the solution you'd like**\r\nSo, please fix that. Either change the name of the queue, or change it in the admin interface, so that someone can look at the admin interface to know what the queue names are...\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "\"\"\" celery status \"\"\"\nimport json\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_GET\nfrom django import forms\nimport redis\n\nfrom celerywyrm import settings\nfrom bookwyrm.tasks import (\n app as celery,\n LOW,\n MEDIUM,\n HIGH,\n STREAMS,\n IMAGES,\n SUGGESTED_USERS,\n EMAIL,\n CONNECTORS,\n LISTS,\n INBOX,\n IMPORTS,\n IMPORT_TRIGGERED,\n BROADCAST,\n MISC,\n)\n\nr = redis.from_url(settings.REDIS_BROKER_URL)\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_instance_settings\", raise_exception=True),\n name=\"dispatch\",\n)\nclass CeleryStatus(View):\n \"\"\"Are your tasks running? Well you'd better go catch them\"\"\"\n\n def get(self, request):\n \"\"\"See workers and active tasks\"\"\"\n errors = []\n try:\n inspect = celery.control.inspect()\n stats = inspect.stats()\n active_tasks = inspect.active()\n # pylint: disable=broad-except\n except Exception as err:\n stats = active_tasks = None\n errors.append(err)\n\n try:\n queues = {\n LOW: r.llen(LOW),\n MEDIUM: r.llen(MEDIUM),\n HIGH: r.llen(HIGH),\n STREAMS: r.llen(STREAMS),\n IMAGES: r.llen(IMAGES),\n SUGGESTED_USERS: r.llen(SUGGESTED_USERS),\n EMAIL: r.llen(EMAIL),\n CONNECTORS: r.llen(CONNECTORS),\n LISTS: r.llen(LISTS),\n INBOX: r.llen(INBOX),\n IMPORTS: r.llen(IMPORTS),\n IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),\n BROADCAST: r.llen(BROADCAST),\n MISC: r.llen(MISC),\n }\n # pylint: disable=broad-except\n except Exception as err:\n queues = None\n errors.append(err)\n\n form = ClearCeleryForm()\n\n data = {\n \"stats\": stats,\n \"active_tasks\": active_tasks,\n \"queues\": queues,\n \"form\": form,\n \"errors\": errors,\n }\n return TemplateResponse(request, \"settings/celery.html\", data)\n\n def post(self, request):\n \"\"\"Submit form to clear queues\"\"\"\n form = ClearCeleryForm(request.POST)\n if form.is_valid():\n if len(celery.control.ping()) != 0:\n return HttpResponse(\n \"Refusing to delete tasks while Celery worker is active\"\n )\n pipeline = r.pipeline()\n for queue in form.cleaned_data[\"queues\"]:\n for task in r.lrange(queue, 0, -1):\n task_json = json.loads(task)\n if task_json[\"headers\"][\"task\"] in form.cleaned_data[\"tasks\"]:\n pipeline.lrem(queue, 0, task)\n results = pipeline.execute()\n\n return HttpResponse(f\"Deleted {sum(results)} tasks\")\n\n\nclass ClearCeleryForm(forms.Form):\n \"\"\"Form to clear queues\"\"\"\n\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n (LOW, \"Low prioirty\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n (STREAMS, \"Streams\"),\n (IMAGES, \"Images\"),\n (SUGGESTED_USERS, \"Suggested users\"),\n (EMAIL, \"Email\"),\n (CONNECTORS, \"Connectors\"),\n (LISTS, \"Lists\"),\n (INBOX, \"Inbox\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n (BROADCAST, \"Broadcasts\"),\n (MISC, \"Misc\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n tasks = forms.MultipleChoiceField(\n label=\"Tasks\", choices=[], widget=forms.CheckboxSelectMultiple\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n celery.loader.import_default_modules()\n self.fields[\"tasks\"].choices = sorted(\n [(k, k) for k in celery.tasks.keys() if not k.startswith(\"celery.\")]\n )\n\n\n@require_GET\n# pylint: disable=unused-argument\ndef celery_ping(request):\n \"\"\"Just tells you if Celery is on or not\"\"\"\n try:\n ping = celery.control.inspect().ping()\n if ping:\n return HttpResponse()\n # pylint: disable=broad-except\n except Exception:\n pass\n\n return HttpResponse(status=500)\n", "path": "bookwyrm/views/admin/celery_status.py"}]}
2,159
306
gh_patches_debug_33749
rasdani/github-patches
git_diff
scverse__scanpy-2089
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use session_info instead of sinfo [`sinfo` has been replaced](https://pypi.org/project/sinfo/) with [`session_info`](https://gitlab.com/joelostblom/session_info), which is definitely a better name. We should switch over to using this. I think we'll be calling it like: `import session_info; session_info.show(dependencies=True, html=False, **extra_kwargs)` </issue> <code> [start of scanpy/logging.py] 1 """Logging and Profiling 2 """ 3 import io 4 import logging 5 import sys 6 from functools import update_wrapper, partial 7 from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET 8 from datetime import datetime, timedelta, timezone 9 from typing import Optional 10 11 import anndata.logging 12 from sinfo import sinfo 13 14 15 HINT = (INFO + DEBUG) // 2 16 logging.addLevelName(HINT, 'HINT') 17 18 19 class _RootLogger(logging.RootLogger): 20 def __init__(self, level): 21 super().__init__(level) 22 self.propagate = False 23 _RootLogger.manager = logging.Manager(self) 24 25 def log( 26 self, 27 level: int, 28 msg: str, 29 *, 30 extra: Optional[dict] = None, 31 time: datetime = None, 32 deep: Optional[str] = None, 33 ) -> datetime: 34 from . import settings 35 36 now = datetime.now(timezone.utc) 37 time_passed: timedelta = None if time is None else now - time 38 extra = { 39 **(extra or {}), 40 'deep': deep if settings.verbosity.level < level else None, 41 'time_passed': time_passed, 42 } 43 super().log(level, msg, extra=extra) 44 return now 45 46 def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime: 47 return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra) 48 49 def error(self, msg, *, time=None, deep=None, extra=None) -> datetime: 50 return self.log(ERROR, msg, time=time, deep=deep, extra=extra) 51 52 def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime: 53 return self.log(WARNING, msg, time=time, deep=deep, extra=extra) 54 55 def info(self, msg, *, time=None, deep=None, extra=None) -> datetime: 56 return self.log(INFO, msg, time=time, deep=deep, extra=extra) 57 58 def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime: 59 return self.log(HINT, msg, time=time, deep=deep, extra=extra) 60 61 def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime: 62 return self.log(DEBUG, msg, time=time, deep=deep, extra=extra) 63 64 65 def _set_log_file(settings): 66 file = settings.logfile 67 name = settings.logpath 68 root = settings._root_logger 69 h = logging.StreamHandler(file) if name is None else logging.FileHandler(name) 70 h.setFormatter(_LogFormatter()) 71 h.setLevel(root.level) 72 if len(root.handlers) == 1: 73 root.removeHandler(root.handlers[0]) 74 elif len(root.handlers) > 1: 75 raise RuntimeError('Scanpy’s root logger somehow got more than one handler') 76 root.addHandler(h) 77 78 79 def _set_log_level(settings, level: int): 80 root = settings._root_logger 81 root.setLevel(level) 82 (h,) = root.handlers # may only be 1 83 h.setLevel(level) 84 85 86 class _LogFormatter(logging.Formatter): 87 def __init__( 88 self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{' 89 ): 90 super().__init__(fmt, datefmt, style) 91 92 def format(self, record: logging.LogRecord): 93 format_orig = self._style._fmt 94 if record.levelno == INFO: 95 self._style._fmt = '{message}' 96 elif record.levelno == HINT: 97 self._style._fmt = '--> {message}' 98 elif record.levelno == DEBUG: 99 self._style._fmt = ' {message}' 100 if record.time_passed: 101 # strip microseconds 102 if record.time_passed.microseconds: 103 record.time_passed = timedelta( 104 seconds=int(record.time_passed.total_seconds()) 105 ) 106 if '{time_passed}' in record.msg: 107 record.msg = record.msg.replace( 108 '{time_passed}', str(record.time_passed) 109 ) 110 else: 111 self._style._fmt += ' ({time_passed})' 112 if record.deep: 113 record.msg = f'{record.msg}: {record.deep}' 114 result = logging.Formatter.format(self, record) 115 self._style._fmt = format_orig 116 return result 117 118 119 print_memory_usage = anndata.logging.print_memory_usage 120 get_memory_usage = anndata.logging.get_memory_usage 121 122 123 _DEPENDENCIES_NUMERICS = [ 124 'anndata', # anndata actually shouldn't, but as long as it's in development 125 'umap', 126 'numpy', 127 'scipy', 128 'pandas', 129 ('sklearn', 'scikit-learn'), 130 'statsmodels', 131 ('igraph', 'python-igraph'), 132 'louvain', 133 'leidenalg', 134 'pynndescent', 135 ] 136 137 138 def _versions_dependencies(dependencies): 139 # this is not the same as the requirements! 140 for mod in dependencies: 141 mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod) 142 try: 143 imp = __import__(mod_name) 144 yield dist_name, imp.__version__ 145 except (ImportError, AttributeError): 146 pass 147 148 149 def print_header(*, file=None): 150 """\ 151 Versions that might influence the numerical results. 152 Matplotlib and Seaborn are excluded from this. 153 """ 154 155 modules = ['scanpy'] + _DEPENDENCIES_NUMERICS 156 print( 157 ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)), 158 file=file or sys.stdout, 159 ) 160 161 162 def print_versions(*, file=None): 163 """Print print versions of imported packages""" 164 if file is None: # Inform people about the behavior change 165 warning('If you miss a compact list, please try `print_header`!') 166 stdout = sys.stdout 167 try: 168 buf = sys.stdout = io.StringIO() 169 sinfo( 170 dependencies=True, 171 excludes=[ 172 'builtins', 173 'stdlib_list', 174 'importlib_metadata', 175 # Special module present if test coverage being calculated 176 # https://gitlab.com/joelostblom/sinfo/-/issues/10 177 "$coverage", 178 ], 179 ) 180 finally: 181 sys.stdout = stdout 182 output = buf.getvalue() 183 print(output, file=file) 184 185 186 def print_version_and_date(*, file=None): 187 """\ 188 Useful for starting a notebook so you see when you started working. 189 """ 190 from . import __version__ 191 192 if file is None: 193 file = sys.stdout 194 print( 195 f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.', 196 file=file, 197 ) 198 199 200 def _copy_docs_and_signature(fn): 201 return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__']) 202 203 204 def error( 205 msg: str, 206 *, 207 time: datetime = None, 208 deep: Optional[str] = None, 209 extra: Optional[dict] = None, 210 ) -> datetime: 211 """\ 212 Log message with specific level and return current time. 213 214 Parameters 215 ========== 216 msg 217 Message to display. 218 time 219 A time in the past. If this is passed, the time difference from then 220 to now is appended to `msg` as ` (HH:MM:SS)`. 221 If `msg` contains `{time_passed}`, the time difference is instead 222 inserted at that position. 223 deep 224 If the current verbosity is higher than the log function’s level, 225 this gets displayed as well 226 extra 227 Additional values you can specify in `msg` like `{time_passed}`. 228 """ 229 from ._settings import settings 230 231 return settings._root_logger.error(msg, time=time, deep=deep, extra=extra) 232 233 234 @_copy_docs_and_signature(error) 235 def warning(msg, *, time=None, deep=None, extra=None) -> datetime: 236 from ._settings import settings 237 238 return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra) 239 240 241 @_copy_docs_and_signature(error) 242 def info(msg, *, time=None, deep=None, extra=None) -> datetime: 243 from ._settings import settings 244 245 return settings._root_logger.info(msg, time=time, deep=deep, extra=extra) 246 247 248 @_copy_docs_and_signature(error) 249 def hint(msg, *, time=None, deep=None, extra=None) -> datetime: 250 from ._settings import settings 251 252 return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra) 253 254 255 @_copy_docs_and_signature(error) 256 def debug(msg, *, time=None, deep=None, extra=None) -> datetime: 257 from ._settings import settings 258 259 return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra) 260 [end of scanpy/logging.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scanpy/logging.py b/scanpy/logging.py --- a/scanpy/logging.py +++ b/scanpy/logging.py @@ -1,15 +1,14 @@ """Logging and Profiling """ -import io import logging import sys from functools import update_wrapper, partial -from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG from datetime import datetime, timedelta, timezone -from typing import Optional +from typing import Optional, IO +import warnings import anndata.logging -from sinfo import sinfo HINT = (INFO + DEBUG) // 2 @@ -159,28 +158,37 @@ ) -def print_versions(*, file=None): - """Print print versions of imported packages""" - if file is None: # Inform people about the behavior change - warning('If you miss a compact list, please try `print_header`!') - stdout = sys.stdout - try: - buf = sys.stdout = io.StringIO() - sinfo( +def print_versions(*, file: Optional[IO[str]] = None): + """\ + Print versions of imported packages, OS, and jupyter environment. + + For more options (including rich output) use `session_info.show` directly. + """ + import session_info + + if file is not None: + from contextlib import redirect_stdout + + warnings.warn( + "Passing argument 'file' to print_versions is deprecated, and will be " + "removed in a future version.", + FutureWarning, + ) + with redirect_stdout(file): + print_versions() + else: + session_info.show( dependencies=True, + html=False, excludes=[ 'builtins', 'stdlib_list', 'importlib_metadata', # Special module present if test coverage being calculated - # https://gitlab.com/joelostblom/sinfo/-/issues/10 + # https://gitlab.com/joelostblom/session_info/-/issues/10 "$coverage", ], ) - finally: - sys.stdout = stdout - output = buf.getvalue() - print(output, file=file) def print_version_and_date(*, file=None):
{"golden_diff": "diff --git a/scanpy/logging.py b/scanpy/logging.py\n--- a/scanpy/logging.py\n+++ b/scanpy/logging.py\n@@ -1,15 +1,14 @@\n \"\"\"Logging and Profiling\n \"\"\"\n-import io\n import logging\n import sys\n from functools import update_wrapper, partial\n-from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\n+from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG\n from datetime import datetime, timedelta, timezone\n-from typing import Optional\n+from typing import Optional, IO\n+import warnings\n \n import anndata.logging\n-from sinfo import sinfo\n \n \n HINT = (INFO + DEBUG) // 2\n@@ -159,28 +158,37 @@\n )\n \n \n-def print_versions(*, file=None):\n- \"\"\"Print print versions of imported packages\"\"\"\n- if file is None: # Inform people about the behavior change\n- warning('If you miss a compact list, please try `print_header`!')\n- stdout = sys.stdout\n- try:\n- buf = sys.stdout = io.StringIO()\n- sinfo(\n+def print_versions(*, file: Optional[IO[str]] = None):\n+ \"\"\"\\\n+ Print versions of imported packages, OS, and jupyter environment.\n+\n+ For more options (including rich output) use `session_info.show` directly.\n+ \"\"\"\n+ import session_info\n+\n+ if file is not None:\n+ from contextlib import redirect_stdout\n+\n+ warnings.warn(\n+ \"Passing argument 'file' to print_versions is deprecated, and will be \"\n+ \"removed in a future version.\",\n+ FutureWarning,\n+ )\n+ with redirect_stdout(file):\n+ print_versions()\n+ else:\n+ session_info.show(\n dependencies=True,\n+ html=False,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n- # https://gitlab.com/joelostblom/sinfo/-/issues/10\n+ # https://gitlab.com/joelostblom/session_info/-/issues/10\n \"$coverage\",\n ],\n )\n- finally:\n- sys.stdout = stdout\n- output = buf.getvalue()\n- print(output, file=file)\n \n \n def print_version_and_date(*, file=None):\n", "issue": "Use session_info instead of sinfo\n[`sinfo` has been replaced](https://pypi.org/project/sinfo/) with [`session_info`](https://gitlab.com/joelostblom/session_info), which is definitely a better name. We should switch over to using this.\r\n\r\nI think we'll be calling it like: `import session_info; session_info.show(dependencies=True, html=False, **extra_kwargs)`\n", "before_files": [{"content": "\"\"\"Logging and Profiling\n\"\"\"\nimport io\nimport logging\nimport sys\nfrom functools import update_wrapper, partial\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Optional\n\nimport anndata.logging\nfrom sinfo import sinfo\n\n\nHINT = (INFO + DEBUG) // 2\nlogging.addLevelName(HINT, 'HINT')\n\n\nclass _RootLogger(logging.RootLogger):\n def __init__(self, level):\n super().__init__(level)\n self.propagate = False\n _RootLogger.manager = logging.Manager(self)\n\n def log(\n self,\n level: int,\n msg: str,\n *,\n extra: Optional[dict] = None,\n time: datetime = None,\n deep: Optional[str] = None,\n ) -> datetime:\n from . import settings\n\n now = datetime.now(timezone.utc)\n time_passed: timedelta = None if time is None else now - time\n extra = {\n **(extra or {}),\n 'deep': deep if settings.verbosity.level < level else None,\n 'time_passed': time_passed,\n }\n super().log(level, msg, extra=extra)\n return now\n\n def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)\n\n def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(ERROR, msg, time=time, deep=deep, extra=extra)\n\n def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(WARNING, msg, time=time, deep=deep, extra=extra)\n\n def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(INFO, msg, time=time, deep=deep, extra=extra)\n\n def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(HINT, msg, time=time, deep=deep, extra=extra)\n\n def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)\n\n\ndef _set_log_file(settings):\n file = settings.logfile\n name = settings.logpath\n root = settings._root_logger\n h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)\n h.setFormatter(_LogFormatter())\n h.setLevel(root.level)\n if len(root.handlers) == 1:\n root.removeHandler(root.handlers[0])\n elif len(root.handlers) > 1:\n raise RuntimeError('Scanpy\u2019s root logger somehow got more than one handler')\n root.addHandler(h)\n\n\ndef _set_log_level(settings, level: int):\n root = settings._root_logger\n root.setLevel(level)\n (h,) = root.handlers # may only be 1\n h.setLevel(level)\n\n\nclass _LogFormatter(logging.Formatter):\n def __init__(\n self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'\n ):\n super().__init__(fmt, datefmt, style)\n\n def format(self, record: logging.LogRecord):\n format_orig = self._style._fmt\n if record.levelno == INFO:\n self._style._fmt = '{message}'\n elif record.levelno == HINT:\n self._style._fmt = '--> {message}'\n elif record.levelno == DEBUG:\n self._style._fmt = ' {message}'\n if record.time_passed:\n # strip microseconds\n if record.time_passed.microseconds:\n record.time_passed = timedelta(\n seconds=int(record.time_passed.total_seconds())\n )\n if '{time_passed}' in record.msg:\n record.msg = record.msg.replace(\n '{time_passed}', str(record.time_passed)\n )\n else:\n self._style._fmt += ' ({time_passed})'\n if record.deep:\n record.msg = f'{record.msg}: {record.deep}'\n result = logging.Formatter.format(self, record)\n self._style._fmt = format_orig\n return result\n\n\nprint_memory_usage = anndata.logging.print_memory_usage\nget_memory_usage = anndata.logging.get_memory_usage\n\n\n_DEPENDENCIES_NUMERICS = [\n 'anndata', # anndata actually shouldn't, but as long as it's in development\n 'umap',\n 'numpy',\n 'scipy',\n 'pandas',\n ('sklearn', 'scikit-learn'),\n 'statsmodels',\n ('igraph', 'python-igraph'),\n 'louvain',\n 'leidenalg',\n 'pynndescent',\n]\n\n\ndef _versions_dependencies(dependencies):\n # this is not the same as the requirements!\n for mod in dependencies:\n mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)\n try:\n imp = __import__(mod_name)\n yield dist_name, imp.__version__\n except (ImportError, AttributeError):\n pass\n\n\ndef print_header(*, file=None):\n \"\"\"\\\n Versions that might influence the numerical results.\n Matplotlib and Seaborn are excluded from this.\n \"\"\"\n\n modules = ['scanpy'] + _DEPENDENCIES_NUMERICS\n print(\n ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),\n file=file or sys.stdout,\n )\n\n\ndef print_versions(*, file=None):\n \"\"\"Print print versions of imported packages\"\"\"\n if file is None: # Inform people about the behavior change\n warning('If you miss a compact list, please try `print_header`!')\n stdout = sys.stdout\n try:\n buf = sys.stdout = io.StringIO()\n sinfo(\n dependencies=True,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n # https://gitlab.com/joelostblom/sinfo/-/issues/10\n \"$coverage\",\n ],\n )\n finally:\n sys.stdout = stdout\n output = buf.getvalue()\n print(output, file=file)\n\n\ndef print_version_and_date(*, file=None):\n \"\"\"\\\n Useful for starting a notebook so you see when you started working.\n \"\"\"\n from . import __version__\n\n if file is None:\n file = sys.stdout\n print(\n f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',\n file=file,\n )\n\n\ndef _copy_docs_and_signature(fn):\n return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])\n\n\ndef error(\n msg: str,\n *,\n time: datetime = None,\n deep: Optional[str] = None,\n extra: Optional[dict] = None,\n) -> datetime:\n \"\"\"\\\n Log message with specific level and return current time.\n\n Parameters\n ==========\n msg\n Message to display.\n time\n A time in the past. If this is passed, the time difference from then\n to now is appended to `msg` as ` (HH:MM:SS)`.\n If `msg` contains `{time_passed}`, the time difference is instead\n inserted at that position.\n deep\n If the current verbosity is higher than the log function\u2019s level,\n this gets displayed as well\n extra\n Additional values you can specify in `msg` like `{time_passed}`.\n \"\"\"\n from ._settings import settings\n\n return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef warning(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef info(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef hint(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef debug(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)\n", "path": "scanpy/logging.py"}]}
3,214
524
gh_patches_debug_17390
rasdani/github-patches
git_diff
pyodide__pyodide-3485
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `pyodide_build.out_of_tree.venv.create_pyodide_venv` wrongly requires an Emscripten compiler See https://github.com/pyodide/pyodide/discussions/3462#discussioncomment-4710208 </issue> <code> [start of pyodide-build/pyodide_build/out_of_tree/venv.py] 1 import shutil 2 import subprocess 3 import sys 4 import textwrap 5 from pathlib import Path 6 from typing import Any 7 8 from ..common import ( 9 check_emscripten_version, 10 exit_with_stdio, 11 get_make_flag, 12 get_pyodide_root, 13 in_xbuildenv, 14 ) 15 from ..logger import logger 16 17 18 def check_result(result: subprocess.CompletedProcess[str], msg: str) -> None: 19 """Abort if the process returns a nonzero error code""" 20 if result.returncode != 0: 21 logger.error(msg) 22 exit_with_stdio(result) 23 24 25 def dedent(s: str) -> str: 26 return textwrap.dedent(s).strip() + "\n" 27 28 29 def get_pyversion() -> str: 30 return f"{sys.version_info.major}.{sys.version_info.minor}" 31 32 33 def check_host_python_version(session: Any) -> None: 34 pyodide_version = session.interpreter.version.partition(" ")[0].split(".")[:2] 35 sys_version = [str(sys.version_info.major), str(sys.version_info.minor)] 36 if pyodide_version == sys_version: 37 return 38 pyodide_version_fmt = ".".join(pyodide_version) 39 sys_version_fmt = ".".join(sys_version) 40 logger.stderr( 41 f"Expected host Python version to be {pyodide_version_fmt} but got version {sys_version_fmt}" 42 ) 43 sys.exit(1) 44 45 46 def pyodide_dist_dir() -> Path: 47 return get_pyodide_root() / "dist" 48 49 50 def create_pip_conf(venv_root: Path) -> None: 51 """Create pip.conf file in venv root 52 53 This file adds a few options that will always be used by pip install. 54 """ 55 if in_xbuildenv(): 56 # In the xbuildenv, we don't have the packages locally. We will include 57 # in the xbuildenv a PEP 503 index for the vendored Pyodide packages 58 # https://peps.python.org/pep-0503/ 59 repo = f'extra-index-url=file:{get_pyodide_root()/"pypa_index"}' 60 else: 61 # In the Pyodide development environment, the Pyodide dist directory 62 # should contain the needed wheels. find-links 63 repo = f"find-links={pyodide_dist_dir()}" 64 65 # Prevent attempts to install binary wheels from source. 66 # Maybe some day we can convince pip to invoke `pyodide build` as the build 67 # front end for wheels... 68 (venv_root / "pip.conf").write_text( 69 dedent( 70 f""" 71 [install] 72 only-binary=:all: 73 {repo} 74 """ 75 ) 76 ) 77 78 79 def get_pip_monkeypatch(venv_bin: Path) -> str: 80 """Monkey patch pip's environment to show info about Pyodide's environment. 81 82 The code returned is injected at the beginning of the pip script. 83 """ 84 result = subprocess.run( 85 [ 86 venv_bin / "python", 87 "-c", 88 dedent( 89 """ 90 import os, sys, sysconfig, platform 91 print([ 92 os.name, 93 sys.platform, 94 sys.implementation._multiarch, 95 sysconfig.get_platform() 96 ]) 97 """ 98 ), 99 ], 100 capture_output=True, 101 encoding="utf8", 102 ) 103 check_result(result, "ERROR: failed to invoke Pyodide") 104 platform_data = result.stdout 105 sysconfigdata_dir = Path(get_make_flag("TARGETINSTALLDIR")) / "sysconfigdata" 106 107 return dedent( 108 f"""\ 109 import os 110 import sys 111 os_name, sys_platform, multiarch, host_platform = {platform_data} 112 os.name = os_name 113 sys.platform = sys_platform 114 sys.implementation._multiarch = multiarch 115 os.environ["_PYTHON_HOST_PLATFORM"] = host_platform 116 os.environ["_PYTHON_SYSCONFIGDATA_NAME"] = f'_sysconfigdata_{{sys.abiflags}}_{{sys.platform}}_{{sys.implementation._multiarch}}' 117 sys.path.append("{sysconfigdata_dir}") 118 import sysconfig 119 sysconfig.get_config_vars() 120 del os.environ["_PYTHON_SYSCONFIGDATA_NAME"] 121 """ 122 ) 123 124 125 def create_pip_script(venv_bin): 126 """Create pip and write it into the virtualenv bin folder.""" 127 # pip needs to run in the host Python not in Pyodide, so we'll use the host 128 # Python in the shebang. Use whichever Python was used to invoke 129 # pyodide venv. 130 host_python_path = venv_bin / f"python{get_pyversion()}-host" 131 host_python_path.symlink_to(sys.executable) 132 133 (venv_bin / "pip").write_text( 134 # Other than the shebang and the monkey patch, this is exactly what 135 # normal pip looks like. 136 f"#!{host_python_path} -s\n" 137 + get_pip_monkeypatch(venv_bin) 138 + dedent( 139 """ 140 import re 141 import sys 142 from pip._internal.cli.main import main 143 if __name__ == '__main__': 144 sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0]) 145 sys.exit(main()) 146 """ 147 ) 148 ) 149 (venv_bin / "pip").chmod(0o777) 150 151 pyversion = get_pyversion() 152 other_pips = [ 153 venv_bin / "pip3", 154 venv_bin / f"pip{pyversion}", 155 venv_bin / f"pip-{pyversion}", 156 ] 157 158 for pip in other_pips: 159 pip.unlink() 160 pip.symlink_to(venv_bin / "pip") 161 162 163 def create_pyodide_script(venv_bin: Path) -> None: 164 """Write pyodide cli script into the virtualenv bin folder""" 165 import os 166 167 # Temporarily restore us to the environment that 'pyodide venv' was 168 # invoked in 169 PATH = os.environ["PATH"] 170 PYODIDE_ROOT = os.environ["PYODIDE_ROOT"] 171 172 original_pyodide_cli = shutil.which("pyodide") 173 if original_pyodide_cli is None: 174 raise RuntimeError("ERROR: pyodide cli not found") 175 176 pyodide_path = venv_bin / "pyodide" 177 pyodide_path.write_text( 178 dedent( 179 f""" 180 #!/bin/sh 181 PATH='{PATH}' PYODIDE_ROOT='{PYODIDE_ROOT}' exec {original_pyodide_cli} "$@" 182 """ 183 ) 184 ) 185 pyodide_path.chmod(0o777) 186 187 188 def install_stdlib(venv_bin: Path) -> None: 189 """Install micropip and all unvendored stdlib modules""" 190 # Micropip we could install with pip hypothetically, but because we use 191 # `--extra-index-url` it would install the pypi version which we don't want. 192 193 # Other stuff we need to load with loadPackage 194 # TODO: Also load all shared libs. 195 to_load = ["micropip"] 196 result = subprocess.run( 197 [ 198 venv_bin / "python", 199 "-c", 200 dedent( 201 f""" 202 from _pyodide._importhook import UNVENDORED_STDLIBS_AND_TEST; 203 from pyodide_js import loadPackage; 204 from pyodide_js._api import repodata_packages 205 shared_libs = [pkgname for (pkgname,pkg) in repodata_packages.object_entries() if getattr(pkg, "shared_library", False)] 206 207 to_load = [*UNVENDORED_STDLIBS_AND_TEST, *shared_libs, *{to_load!r}] 208 loadPackage(to_load); 209 """ 210 ), 211 ], 212 capture_output=True, 213 encoding="utf8", 214 ) 215 check_result(result, "ERROR: failed to install unvendored stdlib modules") 216 217 218 def create_pyodide_venv(dest: Path) -> None: 219 """Create a Pyodide virtualenv and store it into dest""" 220 logger.info(f"Creating Pyodide virtualenv at {dest}") 221 from virtualenv import session_via_cli 222 223 if dest.exists(): 224 logger.error(f"ERROR: dest directory '{dest}' already exists") 225 sys.exit(1) 226 227 check_emscripten_version() 228 229 interp_path = pyodide_dist_dir() / "python" 230 session = session_via_cli(["--no-wheel", "-p", str(interp_path), str(dest)]) 231 check_host_python_version(session) 232 233 try: 234 session.run() 235 venv_root = Path(session.creator.dest).absolute() 236 venv_bin = venv_root / "bin" 237 238 logger.info("... Configuring virtualenv") 239 create_pip_conf(venv_root) 240 create_pip_script(venv_bin) 241 create_pyodide_script(venv_bin) 242 logger.info("... Installing standard library") 243 install_stdlib(venv_bin) 244 except (Exception, KeyboardInterrupt, SystemExit): 245 shutil.rmtree(session.creator.dest) 246 raise 247 248 logger.success("Successfully created Pyodide virtual environment!") 249 [end of pyodide-build/pyodide_build/out_of_tree/venv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyodide-build/pyodide_build/out_of_tree/venv.py b/pyodide-build/pyodide_build/out_of_tree/venv.py --- a/pyodide-build/pyodide_build/out_of_tree/venv.py +++ b/pyodide-build/pyodide_build/out_of_tree/venv.py @@ -5,13 +5,7 @@ from pathlib import Path from typing import Any -from ..common import ( - check_emscripten_version, - exit_with_stdio, - get_make_flag, - get_pyodide_root, - in_xbuildenv, -) +from ..common import exit_with_stdio, get_make_flag, get_pyodide_root, in_xbuildenv from ..logger import logger @@ -224,8 +218,6 @@ logger.error(f"ERROR: dest directory '{dest}' already exists") sys.exit(1) - check_emscripten_version() - interp_path = pyodide_dist_dir() / "python" session = session_via_cli(["--no-wheel", "-p", str(interp_path), str(dest)]) check_host_python_version(session)
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/out_of_tree/venv.py b/pyodide-build/pyodide_build/out_of_tree/venv.py\n--- a/pyodide-build/pyodide_build/out_of_tree/venv.py\n+++ b/pyodide-build/pyodide_build/out_of_tree/venv.py\n@@ -5,13 +5,7 @@\n from pathlib import Path\n from typing import Any\n \n-from ..common import (\n- check_emscripten_version,\n- exit_with_stdio,\n- get_make_flag,\n- get_pyodide_root,\n- in_xbuildenv,\n-)\n+from ..common import exit_with_stdio, get_make_flag, get_pyodide_root, in_xbuildenv\n from ..logger import logger\n \n \n@@ -224,8 +218,6 @@\n logger.error(f\"ERROR: dest directory '{dest}' already exists\")\n sys.exit(1)\n \n- check_emscripten_version()\n-\n interp_path = pyodide_dist_dir() / \"python\"\n session = session_via_cli([\"--no-wheel\", \"-p\", str(interp_path), str(dest)])\n check_host_python_version(session)\n", "issue": "`pyodide_build.out_of_tree.venv.create_pyodide_venv` wrongly requires an Emscripten compiler\nSee https://github.com/pyodide/pyodide/discussions/3462#discussioncomment-4710208\n", "before_files": [{"content": "import shutil\nimport subprocess\nimport sys\nimport textwrap\nfrom pathlib import Path\nfrom typing import Any\n\nfrom ..common import (\n check_emscripten_version,\n exit_with_stdio,\n get_make_flag,\n get_pyodide_root,\n in_xbuildenv,\n)\nfrom ..logger import logger\n\n\ndef check_result(result: subprocess.CompletedProcess[str], msg: str) -> None:\n \"\"\"Abort if the process returns a nonzero error code\"\"\"\n if result.returncode != 0:\n logger.error(msg)\n exit_with_stdio(result)\n\n\ndef dedent(s: str) -> str:\n return textwrap.dedent(s).strip() + \"\\n\"\n\n\ndef get_pyversion() -> str:\n return f\"{sys.version_info.major}.{sys.version_info.minor}\"\n\n\ndef check_host_python_version(session: Any) -> None:\n pyodide_version = session.interpreter.version.partition(\" \")[0].split(\".\")[:2]\n sys_version = [str(sys.version_info.major), str(sys.version_info.minor)]\n if pyodide_version == sys_version:\n return\n pyodide_version_fmt = \".\".join(pyodide_version)\n sys_version_fmt = \".\".join(sys_version)\n logger.stderr(\n f\"Expected host Python version to be {pyodide_version_fmt} but got version {sys_version_fmt}\"\n )\n sys.exit(1)\n\n\ndef pyodide_dist_dir() -> Path:\n return get_pyodide_root() / \"dist\"\n\n\ndef create_pip_conf(venv_root: Path) -> None:\n \"\"\"Create pip.conf file in venv root\n\n This file adds a few options that will always be used by pip install.\n \"\"\"\n if in_xbuildenv():\n # In the xbuildenv, we don't have the packages locally. We will include\n # in the xbuildenv a PEP 503 index for the vendored Pyodide packages\n # https://peps.python.org/pep-0503/\n repo = f'extra-index-url=file:{get_pyodide_root()/\"pypa_index\"}'\n else:\n # In the Pyodide development environment, the Pyodide dist directory\n # should contain the needed wheels. find-links\n repo = f\"find-links={pyodide_dist_dir()}\"\n\n # Prevent attempts to install binary wheels from source.\n # Maybe some day we can convince pip to invoke `pyodide build` as the build\n # front end for wheels...\n (venv_root / \"pip.conf\").write_text(\n dedent(\n f\"\"\"\n [install]\n only-binary=:all:\n {repo}\n \"\"\"\n )\n )\n\n\ndef get_pip_monkeypatch(venv_bin: Path) -> str:\n \"\"\"Monkey patch pip's environment to show info about Pyodide's environment.\n\n The code returned is injected at the beginning of the pip script.\n \"\"\"\n result = subprocess.run(\n [\n venv_bin / \"python\",\n \"-c\",\n dedent(\n \"\"\"\n import os, sys, sysconfig, platform\n print([\n os.name,\n sys.platform,\n sys.implementation._multiarch,\n sysconfig.get_platform()\n ])\n \"\"\"\n ),\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n check_result(result, \"ERROR: failed to invoke Pyodide\")\n platform_data = result.stdout\n sysconfigdata_dir = Path(get_make_flag(\"TARGETINSTALLDIR\")) / \"sysconfigdata\"\n\n return dedent(\n f\"\"\"\\\n import os\n import sys\n os_name, sys_platform, multiarch, host_platform = {platform_data}\n os.name = os_name\n sys.platform = sys_platform\n sys.implementation._multiarch = multiarch\n os.environ[\"_PYTHON_HOST_PLATFORM\"] = host_platform\n os.environ[\"_PYTHON_SYSCONFIGDATA_NAME\"] = f'_sysconfigdata_{{sys.abiflags}}_{{sys.platform}}_{{sys.implementation._multiarch}}'\n sys.path.append(\"{sysconfigdata_dir}\")\n import sysconfig\n sysconfig.get_config_vars()\n del os.environ[\"_PYTHON_SYSCONFIGDATA_NAME\"]\n \"\"\"\n )\n\n\ndef create_pip_script(venv_bin):\n \"\"\"Create pip and write it into the virtualenv bin folder.\"\"\"\n # pip needs to run in the host Python not in Pyodide, so we'll use the host\n # Python in the shebang. Use whichever Python was used to invoke\n # pyodide venv.\n host_python_path = venv_bin / f\"python{get_pyversion()}-host\"\n host_python_path.symlink_to(sys.executable)\n\n (venv_bin / \"pip\").write_text(\n # Other than the shebang and the monkey patch, this is exactly what\n # normal pip looks like.\n f\"#!{host_python_path} -s\\n\"\n + get_pip_monkeypatch(venv_bin)\n + dedent(\n \"\"\"\n import re\n import sys\n from pip._internal.cli.main import main\n if __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\\\.pyw|\\\\.exe)?$', '', sys.argv[0])\n sys.exit(main())\n \"\"\"\n )\n )\n (venv_bin / \"pip\").chmod(0o777)\n\n pyversion = get_pyversion()\n other_pips = [\n venv_bin / \"pip3\",\n venv_bin / f\"pip{pyversion}\",\n venv_bin / f\"pip-{pyversion}\",\n ]\n\n for pip in other_pips:\n pip.unlink()\n pip.symlink_to(venv_bin / \"pip\")\n\n\ndef create_pyodide_script(venv_bin: Path) -> None:\n \"\"\"Write pyodide cli script into the virtualenv bin folder\"\"\"\n import os\n\n # Temporarily restore us to the environment that 'pyodide venv' was\n # invoked in\n PATH = os.environ[\"PATH\"]\n PYODIDE_ROOT = os.environ[\"PYODIDE_ROOT\"]\n\n original_pyodide_cli = shutil.which(\"pyodide\")\n if original_pyodide_cli is None:\n raise RuntimeError(\"ERROR: pyodide cli not found\")\n\n pyodide_path = venv_bin / \"pyodide\"\n pyodide_path.write_text(\n dedent(\n f\"\"\"\n #!/bin/sh\n PATH='{PATH}' PYODIDE_ROOT='{PYODIDE_ROOT}' exec {original_pyodide_cli} \"$@\"\n \"\"\"\n )\n )\n pyodide_path.chmod(0o777)\n\n\ndef install_stdlib(venv_bin: Path) -> None:\n \"\"\"Install micropip and all unvendored stdlib modules\"\"\"\n # Micropip we could install with pip hypothetically, but because we use\n # `--extra-index-url` it would install the pypi version which we don't want.\n\n # Other stuff we need to load with loadPackage\n # TODO: Also load all shared libs.\n to_load = [\"micropip\"]\n result = subprocess.run(\n [\n venv_bin / \"python\",\n \"-c\",\n dedent(\n f\"\"\"\n from _pyodide._importhook import UNVENDORED_STDLIBS_AND_TEST;\n from pyodide_js import loadPackage;\n from pyodide_js._api import repodata_packages\n shared_libs = [pkgname for (pkgname,pkg) in repodata_packages.object_entries() if getattr(pkg, \"shared_library\", False)]\n\n to_load = [*UNVENDORED_STDLIBS_AND_TEST, *shared_libs, *{to_load!r}]\n loadPackage(to_load);\n \"\"\"\n ),\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n check_result(result, \"ERROR: failed to install unvendored stdlib modules\")\n\n\ndef create_pyodide_venv(dest: Path) -> None:\n \"\"\"Create a Pyodide virtualenv and store it into dest\"\"\"\n logger.info(f\"Creating Pyodide virtualenv at {dest}\")\n from virtualenv import session_via_cli\n\n if dest.exists():\n logger.error(f\"ERROR: dest directory '{dest}' already exists\")\n sys.exit(1)\n\n check_emscripten_version()\n\n interp_path = pyodide_dist_dir() / \"python\"\n session = session_via_cli([\"--no-wheel\", \"-p\", str(interp_path), str(dest)])\n check_host_python_version(session)\n\n try:\n session.run()\n venv_root = Path(session.creator.dest).absolute()\n venv_bin = venv_root / \"bin\"\n\n logger.info(\"... Configuring virtualenv\")\n create_pip_conf(venv_root)\n create_pip_script(venv_bin)\n create_pyodide_script(venv_bin)\n logger.info(\"... Installing standard library\")\n install_stdlib(venv_bin)\n except (Exception, KeyboardInterrupt, SystemExit):\n shutil.rmtree(session.creator.dest)\n raise\n\n logger.success(\"Successfully created Pyodide virtual environment!\")\n", "path": "pyodide-build/pyodide_build/out_of_tree/venv.py"}]}
3,241
253
gh_patches_debug_18256
rasdani/github-patches
git_diff
qtile__qtile-2710
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CurrentLayout widget show wrong layout hello everyone , when the system boots up **"CurrentLayout"** widget shows the **max** layout as the current layout which is wrong. I have set the **monadtall** as my default layout but when I restart/reload the qtile with shortcut keys it shows the correct current layout. My layout configuration --> ``` layout_theme = {"border_width": 4, "margin": 8, "border_focus": "#d79921", "border_normal": "#1D2330" } layouts = [ layout.Max(), # layout.Matrix(**layout_theme), layout.MonadTall(**layout_theme), layout.MonadWide(**layout_theme), layout.Floating( border_focus='#d79921', border_width=2 ), layout.TreeTab( font="Ubuntu", fontsize=12, sections=["FIRST", "SECOND", "THIRD", "FOURTH"], section_fontsize=10, border_width=2, bg_color="#1c1f24", active_bg="#d75F5F", active_fg="#000000", inactive_bg="#a89984", inactive_fg="#1c1f24", padding_left=0, padding_x=0, padding_y=5, section_top=10, section_bottom=20, level_shift=8, vspace=3, panel_width=200 ), # layout.VerticalTile(**layout_theme), ] ``` ![2021-08-21_07-58](https://user-images.githubusercontent.com/22582725/130308195-988c9280-a83c-45aa-9417-41a05eefec41.png) Am doing something wrong ? qtile version -- 0.18.1.dev0+g8e7ecc0a.d20210719 os - Arch </issue> <code> [start of libqtile/widget/currentlayout.py] 1 # -*- coding: utf-8 -*- 2 # Copyright (c) 2011 Florian Mounier 3 # Copyright (c) 2011 Kenji_Takahashi 4 # Copyright (c) 2012 roger 5 # Copyright (c) 2012, 2014 Tycho Andersen 6 # Copyright (c) 2012 Maximilian Köhl 7 # Copyright (c) 2013 Craig Barnes 8 # Copyright (c) 2014 Sean Vig 9 # Copyright (c) 2014 Adi Sieker 10 # 11 # Permission is hereby granted, free of charge, to any person obtaining a copy 12 # of this software and associated documentation files (the "Software"), to deal 13 # in the Software without restriction, including without limitation the rights 14 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 # copies of the Software, and to permit persons to whom the Software is 16 # furnished to do so, subject to the following conditions: 17 # 18 # The above copyright notice and this permission notice shall be included in 19 # all copies or substantial portions of the Software. 20 # 21 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 24 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 27 # SOFTWARE. 28 import os 29 30 import cairocffi 31 32 from libqtile import bar, hook 33 from libqtile.log_utils import logger 34 from libqtile.widget import base 35 36 37 class CurrentLayout(base._TextBox): 38 """ 39 Display the name of the current layout of the current group of the screen, 40 the bar containing the widget, is on. 41 """ 42 orientations = base.ORIENTATION_HORIZONTAL 43 44 def __init__(self, width=bar.CALCULATED, **config): 45 base._TextBox.__init__(self, "", width, **config) 46 47 def _configure(self, qtile, bar): 48 base._TextBox._configure(self, qtile, bar) 49 self.text = self.bar.screen.group.layouts[0].name 50 self.setup_hooks() 51 52 self.add_callbacks({ 53 'Button1': qtile.cmd_next_layout, 54 'Button2': qtile.cmd_prev_layout, 55 }) 56 57 def setup_hooks(self): 58 def hook_response(layout, group): 59 if group.screen is not None and group.screen == self.bar.screen: 60 self.text = layout.name 61 self.bar.draw() 62 hook.subscribe.layout_change(hook_response) 63 64 65 class CurrentLayoutIcon(base._TextBox): 66 """ 67 Display the icon representing the current layout of the 68 current group of the screen on which the bar containing the widget is. 69 70 If you are using custom layouts, a default icon with question mark 71 will be displayed for them. If you want to use custom icon for your own 72 layout, for example, `FooGrid`, then create a file named 73 "layout-foogrid.png" and place it in `~/.icons` directory. You can as well 74 use other directories, but then you need to specify those directories 75 in `custom_icon_paths` argument for this plugin. 76 77 The order of icon search is: 78 79 - dirs in `custom_icon_paths` config argument 80 - `~/.icons` 81 - built-in qtile icons 82 """ 83 orientations = base.ORIENTATION_HORIZONTAL 84 85 defaults = [ 86 ( 87 'scale', 88 1, 89 'Scale factor relative to the bar height. ' 90 'Defaults to 1' 91 ), 92 ( 93 'custom_icon_paths', 94 [], 95 'List of folders where to search icons before' 96 'using built-in icons or icons in ~/.icons dir. ' 97 'This can also be used to provide' 98 'missing icons for custom layouts. ' 99 'Defaults to empty list.' 100 ) 101 ] 102 103 def __init__(self, **config): 104 base._TextBox.__init__(self, "", **config) 105 self.add_defaults(CurrentLayoutIcon.defaults) 106 self.scale = 1.0 / self.scale 107 108 self.length_type = bar.STATIC 109 self.length = 0 110 111 def _configure(self, qtile, bar): 112 base._TextBox._configure(self, qtile, bar) 113 self.text = self.bar.screen.group.layouts[0].name 114 self.current_layout = self.text 115 self.icons_loaded = False 116 self.icon_paths = [] 117 self.surfaces = {} 118 self._update_icon_paths() 119 self._setup_images() 120 self._setup_hooks() 121 122 self.add_callbacks({ 123 'Button1': qtile.cmd_next_layout, 124 'Button2': qtile.cmd_prev_layout, 125 }) 126 127 def _setup_hooks(self): 128 """ 129 Listens for layout change and performs a redraw when it occurs. 130 """ 131 def hook_response(layout, group): 132 if group.screen is not None and group.screen == self.bar.screen: 133 self.current_layout = layout.name 134 self.bar.draw() 135 hook.subscribe.layout_change(hook_response) 136 137 def draw(self): 138 if self.icons_loaded: 139 try: 140 surface = self.surfaces[self.current_layout] 141 except KeyError: 142 logger.error('No icon for layout {}'.format( 143 self.current_layout 144 )) 145 else: 146 self.drawer.clear(self.background or self.bar.background) 147 self.drawer.ctx.set_source(surface) 148 self.drawer.ctx.paint() 149 self.drawer.draw(offsetx=self.offset, width=self.length) 150 else: 151 # Fallback to text 152 self.text = self.current_layout[0].upper() 153 base._TextBox.draw(self) 154 155 def _get_layout_names(self): 156 """ 157 Returns the list of lowercased strings for each available layout name. 158 """ 159 return [layout.__class__.__name__.lower() for layout in self.qtile.config.layouts] 160 161 def _update_icon_paths(self): 162 self.icon_paths = [] 163 164 # We allow user to override icon search path 165 self.icon_paths.extend(self.custom_icon_paths) 166 167 # We also look in ~/.icons/ 168 self.icon_paths.append(os.path.expanduser('~/.icons')) 169 170 # Default icons are in libqtile/resources/layout-icons. 171 # If using default config without any custom icons, 172 # this path will be used. 173 root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2]) 174 self.icon_paths.append(os.path.join(root, 'resources', 'layout-icons')) 175 176 def find_icon_file_path(self, layout_name): 177 icon_filename = 'layout-{}.png'.format(layout_name) 178 for icon_path in self.icon_paths: 179 icon_file_path = os.path.join(icon_path, icon_filename) 180 if os.path.isfile(icon_file_path): 181 return icon_file_path 182 183 def _setup_images(self): 184 """ 185 Loads layout icons. 186 """ 187 for layout_name in self._get_layout_names(): 188 icon_file_path = self.find_icon_file_path(layout_name) 189 if icon_file_path is None: 190 logger.warning('No icon found for layout "{}"'.format(layout_name)) 191 icon_file_path = self.find_icon_file_path('unknown') 192 193 try: 194 img = cairocffi.ImageSurface.create_from_png(icon_file_path) 195 except (cairocffi.Error, IOError) as e: 196 # Icon file is guaranteed to exist at this point. 197 # If this exception happens, it means the icon file contains 198 # an invalid image or is not readable. 199 self.icons_loaded = False 200 logger.exception( 201 'Failed to load icon from file "{}", ' 202 'error was: {}'.format(icon_file_path, e.message) 203 ) 204 return 205 206 input_width = img.get_width() 207 input_height = img.get_height() 208 209 sp = input_height / (self.bar.height - 1) 210 211 width = input_width / sp 212 if width > self.length: 213 self.length = int(width) + self.actual_padding * 2 214 215 imgpat = cairocffi.SurfacePattern(img) 216 217 scaler = cairocffi.Matrix() 218 219 scaler.scale(sp, sp) 220 scaler.scale(self.scale, self.scale) 221 factor = (1 - 1 / self.scale) / 2 222 scaler.translate(-width * factor, -width * factor) 223 scaler.translate(self.actual_padding * -1, 0) 224 imgpat.set_matrix(scaler) 225 226 imgpat.set_filter(cairocffi.FILTER_BEST) 227 self.surfaces[layout_name] = imgpat 228 229 self.icons_loaded = True 230 [end of libqtile/widget/currentlayout.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/currentlayout.py b/libqtile/widget/currentlayout.py --- a/libqtile/widget/currentlayout.py +++ b/libqtile/widget/currentlayout.py @@ -46,7 +46,8 @@ def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) - self.text = self.bar.screen.group.layouts[0].name + layout_id = self.bar.screen.group.current_layout + self.text = self.bar.screen.group.layouts[layout_id].name self.setup_hooks() self.add_callbacks({ @@ -110,7 +111,8 @@ def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) - self.text = self.bar.screen.group.layouts[0].name + layout_id = self.bar.screen.group.current_layout + self.text = self.bar.screen.group.layouts[layout_id].name self.current_layout = self.text self.icons_loaded = False self.icon_paths = []
{"golden_diff": "diff --git a/libqtile/widget/currentlayout.py b/libqtile/widget/currentlayout.py\n--- a/libqtile/widget/currentlayout.py\n+++ b/libqtile/widget/currentlayout.py\n@@ -46,7 +46,8 @@\n \n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n- self.text = self.bar.screen.group.layouts[0].name\n+ layout_id = self.bar.screen.group.current_layout\n+ self.text = self.bar.screen.group.layouts[layout_id].name\n self.setup_hooks()\n \n self.add_callbacks({\n@@ -110,7 +111,8 @@\n \n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n- self.text = self.bar.screen.group.layouts[0].name\n+ layout_id = self.bar.screen.group.current_layout\n+ self.text = self.bar.screen.group.layouts[layout_id].name\n self.current_layout = self.text\n self.icons_loaded = False\n self.icon_paths = []\n", "issue": "CurrentLayout widget show wrong layout\nhello everyone , \r\n\r\nwhen the system boots up **\"CurrentLayout\"** widget shows the **max** layout as the current layout which is wrong. I have set the **monadtall** as my default layout but when I restart/reload the qtile with shortcut keys it shows the correct current layout. \r\n\r\nMy layout configuration --> \r\n\r\n```\r\nlayout_theme = {\"border_width\": 4,\r\n \"margin\": 8,\r\n \"border_focus\": \"#d79921\",\r\n \"border_normal\": \"#1D2330\"\r\n }\r\n\r\nlayouts = [\r\n layout.Max(),\r\n # layout.Matrix(**layout_theme),\r\n layout.MonadTall(**layout_theme),\r\n layout.MonadWide(**layout_theme),\r\n layout.Floating(\r\n border_focus='#d79921',\r\n border_width=2\r\n ),\r\n layout.TreeTab(\r\n font=\"Ubuntu\",\r\n fontsize=12,\r\n sections=[\"FIRST\", \"SECOND\", \"THIRD\", \"FOURTH\"],\r\n section_fontsize=10,\r\n border_width=2,\r\n bg_color=\"#1c1f24\",\r\n active_bg=\"#d75F5F\",\r\n active_fg=\"#000000\",\r\n inactive_bg=\"#a89984\",\r\n inactive_fg=\"#1c1f24\",\r\n padding_left=0,\r\n padding_x=0,\r\n padding_y=5,\r\n section_top=10,\r\n section_bottom=20,\r\n level_shift=8,\r\n vspace=3,\r\n panel_width=200\r\n ),\r\n # layout.VerticalTile(**layout_theme),\r\n]\r\n\r\n```\r\n\r\n![2021-08-21_07-58](https://user-images.githubusercontent.com/22582725/130308195-988c9280-a83c-45aa-9417-41a05eefec41.png)\r\n\r\nAm doing something wrong ?\r\n\r\nqtile version -- 0.18.1.dev0+g8e7ecc0a.d20210719\r\nos - Arch \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2011 Florian Mounier\n# Copyright (c) 2011 Kenji_Takahashi\n# Copyright (c) 2012 roger\n# Copyright (c) 2012, 2014 Tycho Andersen\n# Copyright (c) 2012 Maximilian K\u00f6hl\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nimport cairocffi\n\nfrom libqtile import bar, hook\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CurrentLayout(base._TextBox):\n \"\"\"\n Display the name of the current layout of the current group of the screen,\n the bar containing the widget, is on.\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = self.bar.screen.group.layouts[0].name\n self.setup_hooks()\n\n self.add_callbacks({\n 'Button1': qtile.cmd_next_layout,\n 'Button2': qtile.cmd_prev_layout,\n })\n\n def setup_hooks(self):\n def hook_response(layout, group):\n if group.screen is not None and group.screen == self.bar.screen:\n self.text = layout.name\n self.bar.draw()\n hook.subscribe.layout_change(hook_response)\n\n\nclass CurrentLayoutIcon(base._TextBox):\n \"\"\"\n Display the icon representing the current layout of the\n current group of the screen on which the bar containing the widget is.\n\n If you are using custom layouts, a default icon with question mark\n will be displayed for them. If you want to use custom icon for your own\n layout, for example, `FooGrid`, then create a file named\n \"layout-foogrid.png\" and place it in `~/.icons` directory. You can as well\n use other directories, but then you need to specify those directories\n in `custom_icon_paths` argument for this plugin.\n\n The order of icon search is:\n\n - dirs in `custom_icon_paths` config argument\n - `~/.icons`\n - built-in qtile icons\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n\n defaults = [\n (\n 'scale',\n 1,\n 'Scale factor relative to the bar height. '\n 'Defaults to 1'\n ),\n (\n 'custom_icon_paths',\n [],\n 'List of folders where to search icons before'\n 'using built-in icons or icons in ~/.icons dir. '\n 'This can also be used to provide'\n 'missing icons for custom layouts. '\n 'Defaults to empty list.'\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(CurrentLayoutIcon.defaults)\n self.scale = 1.0 / self.scale\n\n self.length_type = bar.STATIC\n self.length = 0\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = self.bar.screen.group.layouts[0].name\n self.current_layout = self.text\n self.icons_loaded = False\n self.icon_paths = []\n self.surfaces = {}\n self._update_icon_paths()\n self._setup_images()\n self._setup_hooks()\n\n self.add_callbacks({\n 'Button1': qtile.cmd_next_layout,\n 'Button2': qtile.cmd_prev_layout,\n })\n\n def _setup_hooks(self):\n \"\"\"\n Listens for layout change and performs a redraw when it occurs.\n \"\"\"\n def hook_response(layout, group):\n if group.screen is not None and group.screen == self.bar.screen:\n self.current_layout = layout.name\n self.bar.draw()\n hook.subscribe.layout_change(hook_response)\n\n def draw(self):\n if self.icons_loaded:\n try:\n surface = self.surfaces[self.current_layout]\n except KeyError:\n logger.error('No icon for layout {}'.format(\n self.current_layout\n ))\n else:\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.set_source(surface)\n self.drawer.ctx.paint()\n self.drawer.draw(offsetx=self.offset, width=self.length)\n else:\n # Fallback to text\n self.text = self.current_layout[0].upper()\n base._TextBox.draw(self)\n\n def _get_layout_names(self):\n \"\"\"\n Returns the list of lowercased strings for each available layout name.\n \"\"\"\n return [layout.__class__.__name__.lower() for layout in self.qtile.config.layouts]\n\n def _update_icon_paths(self):\n self.icon_paths = []\n\n # We allow user to override icon search path\n self.icon_paths.extend(self.custom_icon_paths)\n\n # We also look in ~/.icons/\n self.icon_paths.append(os.path.expanduser('~/.icons'))\n\n # Default icons are in libqtile/resources/layout-icons.\n # If using default config without any custom icons,\n # this path will be used.\n root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])\n self.icon_paths.append(os.path.join(root, 'resources', 'layout-icons'))\n\n def find_icon_file_path(self, layout_name):\n icon_filename = 'layout-{}.png'.format(layout_name)\n for icon_path in self.icon_paths:\n icon_file_path = os.path.join(icon_path, icon_filename)\n if os.path.isfile(icon_file_path):\n return icon_file_path\n\n def _setup_images(self):\n \"\"\"\n Loads layout icons.\n \"\"\"\n for layout_name in self._get_layout_names():\n icon_file_path = self.find_icon_file_path(layout_name)\n if icon_file_path is None:\n logger.warning('No icon found for layout \"{}\"'.format(layout_name))\n icon_file_path = self.find_icon_file_path('unknown')\n\n try:\n img = cairocffi.ImageSurface.create_from_png(icon_file_path)\n except (cairocffi.Error, IOError) as e:\n # Icon file is guaranteed to exist at this point.\n # If this exception happens, it means the icon file contains\n # an invalid image or is not readable.\n self.icons_loaded = False\n logger.exception(\n 'Failed to load icon from file \"{}\", '\n 'error was: {}'.format(icon_file_path, e.message)\n )\n return\n\n input_width = img.get_width()\n input_height = img.get_height()\n\n sp = input_height / (self.bar.height - 1)\n\n width = input_width / sp\n if width > self.length:\n self.length = int(width) + self.actual_padding * 2\n\n imgpat = cairocffi.SurfacePattern(img)\n\n scaler = cairocffi.Matrix()\n\n scaler.scale(sp, sp)\n scaler.scale(self.scale, self.scale)\n factor = (1 - 1 / self.scale) / 2\n scaler.translate(-width * factor, -width * factor)\n scaler.translate(self.actual_padding * -1, 0)\n imgpat.set_matrix(scaler)\n\n imgpat.set_filter(cairocffi.FILTER_BEST)\n self.surfaces[layout_name] = imgpat\n\n self.icons_loaded = True\n", "path": "libqtile/widget/currentlayout.py"}]}
3,447
234
gh_patches_debug_31530
rasdani/github-patches
git_diff
voxel51__fiftyone-3317
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [INSTALL] Some `setup.py` dependencies are not used. ### System information - **OS Platform and Distribution** (e.g., Linux Ubuntu 16.04): Ubuntu 22.04 - **Python version** (`python --version`): 3.9.17 - **FiftyOne version** (`fiftyone --version`): 0.21 - **FiftyOne installed from** (pip or source): pip/poetry ### Describe the problem My main issue was related to `eventlet` library, so I started looking where it's used. When I didn't find a place where it's used I decided to go through `setup.py` to find other unused ones. Here's the list with comments: - `eventlet` - not used in the project but causes issues when it's installed; for example, `FastAPI` crashes during file downloading with a traceback pointing to a file descriptor used by a `socket` library (which is basically `asyncio` internals), and that makes sense because `eventlet` is just a hack to make sync code async. Also I saw the same issues with `s3fs` library. - `future` - this library is for Python2/3 compatibility; not sure if this is needed since the minimum version of Python this project supports is 3.7. - `kaleido` - `kaleido` after installation exports the same symbol `plotly` which I believe is being overwritten with the `plotly` library. **Question**: Am I missing something or these dependencies are really not used in the project? If so is it possible to remove them? (I can make a PR) </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 """ 3 Installs FiftyOne. 4 5 | Copyright 2017-2023, Voxel51, Inc. 6 | `voxel51.com <https://voxel51.com/>`_ 7 | 8 """ 9 try: 10 from importlib import metadata 11 except ImportError: 12 import importlib_metadata as metadata 13 14 import os 15 import re 16 from setuptools import setup, find_packages 17 18 19 VERSION = "0.21.5" 20 21 22 def get_version(): 23 if "RELEASE_VERSION" in os.environ: 24 version = os.environ["RELEASE_VERSION"] 25 if not version.startswith(VERSION): 26 raise ValueError( 27 "Release version does not match version: %s and %s" 28 % (version, VERSION) 29 ) 30 return version 31 32 return VERSION 33 34 35 INSTALL_REQUIRES = [ 36 # third-party packages 37 "aiofiles", 38 "argcomplete", 39 "boto3", 40 "cachetools", 41 "dacite>=1.6.0,<1.8.0", 42 "Deprecated", 43 "eventlet", 44 "ftfy", 45 "future", 46 "hypercorn>=0.13.2", 47 "importlib-metadata; python_version<'3.8'", 48 "Jinja2>=3", 49 "kaleido", 50 "matplotlib", 51 "mongoengine==0.24.2", 52 "motor>=2.5", 53 "numpy", 54 "packaging", 55 "pandas", 56 "Pillow>=6.2", 57 "plotly>=4.14", 58 "pprintpp", 59 "psutil", 60 "pymongo>=3.12", 61 "pytz", 62 "PyYAML", 63 "regex", 64 "retrying", 65 "scikit-learn", 66 "scikit-image", 67 "setuptools", 68 "sseclient-py>=1.7.2,<2", 69 "sse-starlette>=0.10.3,<1", 70 "starlette>=0.24.0", 71 "strawberry-graphql==0.138.1", 72 "tabulate", 73 "xmltodict", 74 "universal-analytics-python3>=1.0.1,<2", 75 # internal packages 76 "fiftyone-brain>=0.13,<0.14", 77 "fiftyone-db>=0.4,<0.5", 78 "voxel51-eta>=0.10,<0.11", 79 ] 80 81 82 CHOOSE_INSTALL_REQUIRES = [ 83 ( 84 ( 85 "opencv-python", 86 "opencv-contrib-python", 87 "opencv-contrib-python-headless", 88 ), 89 "opencv-python-headless", 90 ) 91 ] 92 93 94 def choose_requirement(mains, secondary): 95 chosen = secondary 96 for main in mains: 97 try: 98 name = re.split(r"[!<>=]", main)[0] 99 metadata.version(name) 100 chosen = main 101 break 102 except metadata.PackageNotFoundError: 103 pass 104 105 return str(chosen) 106 107 108 def get_install_requirements(install_requires, choose_install_requires): 109 for mains, secondary in choose_install_requires: 110 install_requires.append(choose_requirement(mains, secondary)) 111 112 return install_requires 113 114 115 EXTRAS_REQUIREMENTS = {"desktop": ["fiftyone-desktop>=0.28.2,<0.29"]} 116 117 118 with open("README.md", "r") as fh: 119 long_description = fh.read() 120 121 122 setup( 123 name="fiftyone", 124 version=get_version(), 125 description=( 126 "FiftyOne: the open-source tool for building high-quality datasets " 127 "and computer vision models" 128 ), 129 author="Voxel51, Inc.", 130 author_email="[email protected]", 131 url="https://github.com/voxel51/fiftyone", 132 extras_require=EXTRAS_REQUIREMENTS, 133 license="Apache", 134 long_description=long_description, 135 long_description_content_type="text/markdown", 136 packages=find_packages( 137 exclude=["app", "eta", "package", "requirements", "tests", "tools"] 138 ) 139 + ["fiftyone.recipes", "fiftyone.tutorials"], 140 package_dir={ 141 "fiftyone.recipes": "docs/source/recipes", 142 "fiftyone.tutorials": "docs/source/tutorials", 143 }, 144 install_requires=get_install_requirements( 145 INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES 146 ), 147 include_package_data=True, 148 classifiers=[ 149 "Development Status :: 4 - Beta", 150 "Intended Audience :: Developers", 151 "Intended Audience :: Science/Research", 152 "License :: OSI Approved :: Apache Software License", 153 "Topic :: Scientific/Engineering :: Artificial Intelligence", 154 "Topic :: Scientific/Engineering :: Image Processing", 155 "Topic :: Scientific/Engineering :: Image Recognition", 156 "Topic :: Scientific/Engineering :: Information Analysis", 157 "Topic :: Scientific/Engineering :: Visualization", 158 "Operating System :: MacOS :: MacOS X", 159 "Operating System :: POSIX :: Linux", 160 "Operating System :: Microsoft :: Windows", 161 "Programming Language :: Python :: 3", 162 "Programming Language :: Python :: 3.7", 163 "Programming Language :: Python :: 3.8", 164 "Programming Language :: Python :: 3.9", 165 "Programming Language :: Python :: 3.10", 166 ], 167 entry_points={"console_scripts": ["fiftyone=fiftyone.core.cli:main"]}, 168 python_requires=">=3.7", 169 ) 170 [end of setup.py] [start of app/packages/looker-3d/load_kiti.py] 1 import fiftyone as fo 2 import fiftyone.zoo as foz 3 import os 4 5 # dataset = foz.load_zoo_dataset("kitti", split="train") 6 7 dataset = fo.load_dataset("kitti-copy") 8 9 dataset.add_sample_field("pcd_filepath", fo.StringField) 10 11 # firstSample = dataset.first() 12 13 # dataDir = os.path.dirname(firstSample.filepath) 14 # dataDirParent = os.path.dirname(dataDir) 15 # pointsDir = os.path.join(dataDirParent, 'pcds') 16 # print(pointsDir) 17 18 # for sample in dataset.iter_samples(progress=True): 19 # base = os.path.basename(sample.filepath) 20 # filename = os.path.splitext(base)[0] 21 # pcd_filename = filename + '.bin.pcd' 22 # pcd_filepath = os.path.join(pointsDir, pcd_filename) 23 # sample['pcd_filepath'] = pcd_filepath 24 # sample.save() 25 [end of app/packages/looker-3d/load_kiti.py] [start of app/packages/looker-3d/load_kitti_labels.py] 1 import fiftyone as fo 2 import fiftyone.zoo as foz 3 import os 4 5 # dataset = foz.load_zoo_dataset("kitti", split="train") 6 7 dataset = fo.load_dataset("kitti-pcd") 8 9 # dataset.add_sample_field('pcd_filepath', fo.StringField) 10 11 firstSample = dataset.first() 12 13 print(firstSample) 14 15 # dataDir = os.path.dirname(firstSample.filepath) 16 # dataDirParent = os.path.dirname(dataDir) 17 # pointsDir = os.path.join(dataDirParent, 'pcds') 18 # print(pointsDir) 19 20 # for sample in dataset.iter_samples(progress=True): 21 # base = os.path.basename(sample.filepath) 22 # filename = os.path.splitext(base)[0] 23 # pcd_filename = filename + '.bin.pcd' 24 # pcd_filepath = os.path.join(pointsDir, pcd_filename) 25 # sample['pcd_filepath'] = pcd_filepath 26 # sample.save() 27 [end of app/packages/looker-3d/load_kitti_labels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/packages/looker-3d/load_kiti.py b/app/packages/looker-3d/load_kiti.py deleted file mode 100644 --- a/app/packages/looker-3d/load_kiti.py +++ /dev/null @@ -1,24 +0,0 @@ -import fiftyone as fo -import fiftyone.zoo as foz -import os - -# dataset = foz.load_zoo_dataset("kitti", split="train") - -dataset = fo.load_dataset("kitti-copy") - -dataset.add_sample_field("pcd_filepath", fo.StringField) - -# firstSample = dataset.first() - -# dataDir = os.path.dirname(firstSample.filepath) -# dataDirParent = os.path.dirname(dataDir) -# pointsDir = os.path.join(dataDirParent, 'pcds') -# print(pointsDir) - -# for sample in dataset.iter_samples(progress=True): -# base = os.path.basename(sample.filepath) -# filename = os.path.splitext(base)[0] -# pcd_filename = filename + '.bin.pcd' -# pcd_filepath = os.path.join(pointsDir, pcd_filename) -# sample['pcd_filepath'] = pcd_filepath -# sample.save() diff --git a/app/packages/looker-3d/load_kitti_labels.py b/app/packages/looker-3d/load_kitti_labels.py deleted file mode 100644 --- a/app/packages/looker-3d/load_kitti_labels.py +++ /dev/null @@ -1,26 +0,0 @@ -import fiftyone as fo -import fiftyone.zoo as foz -import os - -# dataset = foz.load_zoo_dataset("kitti", split="train") - -dataset = fo.load_dataset("kitti-pcd") - -# dataset.add_sample_field('pcd_filepath', fo.StringField) - -firstSample = dataset.first() - -print(firstSample) - -# dataDir = os.path.dirname(firstSample.filepath) -# dataDirParent = os.path.dirname(dataDir) -# pointsDir = os.path.join(dataDirParent, 'pcds') -# print(pointsDir) - -# for sample in dataset.iter_samples(progress=True): -# base = os.path.basename(sample.filepath) -# filename = os.path.splitext(base)[0] -# pcd_filename = filename + '.bin.pcd' -# pcd_filepath = os.path.join(pointsDir, pcd_filename) -# sample['pcd_filepath'] = pcd_filepath -# sample.save() diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -40,12 +40,12 @@ "cachetools", "dacite>=1.6.0,<1.8.0", "Deprecated", - "eventlet", "ftfy", - "future", "hypercorn>=0.13.2", "importlib-metadata; python_version<'3.8'", "Jinja2>=3", + # kaleido indirectly required by plotly for image export + # https://plotly.com/python/static-image-export/ "kaleido", "matplotlib", "mongoengine==0.24.2",
{"golden_diff": "diff --git a/app/packages/looker-3d/load_kiti.py b/app/packages/looker-3d/load_kiti.py\ndeleted file mode 100644\n--- a/app/packages/looker-3d/load_kiti.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-import fiftyone as fo\n-import fiftyone.zoo as foz\n-import os\n-\n-# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n-\n-dataset = fo.load_dataset(\"kitti-copy\")\n-\n-dataset.add_sample_field(\"pcd_filepath\", fo.StringField)\n-\n-# firstSample = dataset.first()\n-\n-# dataDir = os.path.dirname(firstSample.filepath)\n-# dataDirParent = os.path.dirname(dataDir)\n-# pointsDir = os.path.join(dataDirParent, 'pcds')\n-# print(pointsDir)\n-\n-# for sample in dataset.iter_samples(progress=True):\n-# base = os.path.basename(sample.filepath)\n-# filename = os.path.splitext(base)[0]\n-# pcd_filename = filename + '.bin.pcd'\n-# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n-# sample['pcd_filepath'] = pcd_filepath\n-# sample.save()\ndiff --git a/app/packages/looker-3d/load_kitti_labels.py b/app/packages/looker-3d/load_kitti_labels.py\ndeleted file mode 100644\n--- a/app/packages/looker-3d/load_kitti_labels.py\n+++ /dev/null\n@@ -1,26 +0,0 @@\n-import fiftyone as fo\n-import fiftyone.zoo as foz\n-import os\n-\n-# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n-\n-dataset = fo.load_dataset(\"kitti-pcd\")\n-\n-# dataset.add_sample_field('pcd_filepath', fo.StringField)\n-\n-firstSample = dataset.first()\n-\n-print(firstSample)\n-\n-# dataDir = os.path.dirname(firstSample.filepath)\n-# dataDirParent = os.path.dirname(dataDir)\n-# pointsDir = os.path.join(dataDirParent, 'pcds')\n-# print(pointsDir)\n-\n-# for sample in dataset.iter_samples(progress=True):\n-# base = os.path.basename(sample.filepath)\n-# filename = os.path.splitext(base)[0]\n-# pcd_filename = filename + '.bin.pcd'\n-# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n-# sample['pcd_filepath'] = pcd_filepath\n-# sample.save()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,12 +40,12 @@\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n- \"eventlet\",\n \"ftfy\",\n- \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n+ # kaleido indirectly required by plotly for image export\n+ # https://plotly.com/python/static-image-export/\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n", "issue": "[INSTALL] Some `setup.py` dependencies are not used.\n### System information\r\n\r\n- **OS Platform and Distribution** (e.g., Linux Ubuntu 16.04): Ubuntu 22.04\r\n- **Python version** (`python --version`): 3.9.17\r\n- **FiftyOne version** (`fiftyone --version`): 0.21\r\n- **FiftyOne installed from** (pip or source): pip/poetry\r\n\r\n### Describe the problem\r\n\r\nMy main issue was related to `eventlet` library, so I started looking where it's used. When I didn't find a place where it's used I decided to go through `setup.py` to find other unused ones. Here's the list with comments:\r\n\r\n- `eventlet` - not used in the project but causes issues when it's installed; for example, `FastAPI` crashes during file downloading with a traceback pointing to a file descriptor used by a `socket` library (which is basically `asyncio` internals), and that makes sense because `eventlet` is just a hack to make sync code async. Also I saw the same issues with `s3fs` library.\r\n- `future` - this library is for Python2/3 compatibility; not sure if this is needed since the minimum version of Python this project supports is 3.7.\r\n- `kaleido` - `kaleido` after installation exports the same symbol `plotly` which I believe is being overwritten with the `plotly` library.\r\n\r\n**Question**: Am I missing something or these dependencies are really not used in the project? If so is it possible to remove them? (I can make a PR)\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.5\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"eventlet\",\n \"ftfy\",\n \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}, {"content": "import fiftyone as fo\nimport fiftyone.zoo as foz\nimport os\n\n# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n\ndataset = fo.load_dataset(\"kitti-copy\")\n\ndataset.add_sample_field(\"pcd_filepath\", fo.StringField)\n\n# firstSample = dataset.first()\n\n# dataDir = os.path.dirname(firstSample.filepath)\n# dataDirParent = os.path.dirname(dataDir)\n# pointsDir = os.path.join(dataDirParent, 'pcds')\n# print(pointsDir)\n\n# for sample in dataset.iter_samples(progress=True):\n# base = os.path.basename(sample.filepath)\n# filename = os.path.splitext(base)[0]\n# pcd_filename = filename + '.bin.pcd'\n# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n# sample['pcd_filepath'] = pcd_filepath\n# sample.save()\n", "path": "app/packages/looker-3d/load_kiti.py"}, {"content": "import fiftyone as fo\nimport fiftyone.zoo as foz\nimport os\n\n# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n\ndataset = fo.load_dataset(\"kitti-pcd\")\n\n# dataset.add_sample_field('pcd_filepath', fo.StringField)\n\nfirstSample = dataset.first()\n\nprint(firstSample)\n\n# dataDir = os.path.dirname(firstSample.filepath)\n# dataDirParent = os.path.dirname(dataDir)\n# pointsDir = os.path.join(dataDirParent, 'pcds')\n# print(pointsDir)\n\n# for sample in dataset.iter_samples(progress=True):\n# base = os.path.basename(sample.filepath)\n# filename = os.path.splitext(base)[0]\n# pcd_filename = filename + '.bin.pcd'\n# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n# sample['pcd_filepath'] = pcd_filepath\n# sample.save()\n", "path": "app/packages/looker-3d/load_kitti_labels.py"}]}
3,011
728
gh_patches_debug_24836
rasdani/github-patches
git_diff
sktime__sktime-4036
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] LSTM deep learning estimators failing CI on windows Since recently, a failure of two deep learning estimators has been appearing on windows CI: ``` FAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[MLPClassifier-1-ClassifierFitPredictMultivariate-predict] FAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[LSTMFCNClassifier-0-ClassifierFitPredictMultivariate-predict] ``` Note that the failure appears to be only on python 3.9, but this is due to the matrix design which spreads estimators across version/OS combinations. In theory, it could be a 3.9 specific failure, but I think that is less likely than windows specifity (although it may be worth to test that by turning the `matrixdesign` flag off in the CI). </issue> <code> [start of sktime/classification/deep_learning/lstmfcn.py] 1 # -*- coding: utf-8 -*- 2 """LongShort Term Memory Fully Convolutional Network (LSTM-FCN).""" 3 __author__ = ["jnrusson1", "solen0id"] 4 5 __all__ = ["LSTMFCNClassifier"] 6 7 from sklearn.utils import check_random_state 8 9 from sktime.classification.deep_learning.base import BaseDeepClassifier 10 from sktime.networks.lstmfcn import LSTMFCNNetwork 11 from sktime.utils.validation._dependencies import _check_dl_dependencies 12 13 _check_dl_dependencies(severity="warning") 14 15 16 class LSTMFCNClassifier(BaseDeepClassifier): 17 """ 18 19 Implementation of LSTMFCNClassifier from Karim et al (2019) [1]. 20 21 Overview 22 -------- 23 Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the 24 LSTM which the author indicates provides improved performance. 25 26 27 Parameters 28 ---------- 29 n_epochs: int, default=2000 30 the number of epochs to train the model 31 batch_size: int, default=128 32 the number of samples per gradient update. 33 dropout: float, default=0.8 34 controls dropout rate of LSTM layer 35 kernel_sizes: list of ints, default=[8, 5, 3] 36 specifying the length of the 1D convolution windows 37 filter_sizes: int, list of ints, default=[128, 256, 128] 38 size of filter for each conv layer 39 lstm_size: int, default=8 40 output dimension for LSTM layer 41 attention: boolean, default=False 42 If True, uses custom attention LSTM layer 43 callbacks: keras callbacks, default=ReduceLRonPlateau 44 Keras callbacks to use such as learning rate reduction or saving best model 45 based on validation error 46 verbose: 'auto', 0, 1, or 2. Verbosity mode. 47 0 = silent, 1 = progress bar, 2 = one line per epoch. 48 'auto' defaults to 1 for most cases, but 2 when used with 49 `ParameterServerStrategy`. Note that the progress bar is not 50 particularly useful when logged to a file, so verbose=2 is 51 recommended when not running interactively (eg, in a production 52 environment). 53 random_state : int or None, default=None 54 Seed for random, integer. 55 56 57 Notes 58 ----- 59 Ported from sktime-dl source code 60 https://github.com/sktime/sktime-dl/blob/master/sktime_dl/classification/_lstmfcn.py 61 62 References 63 ---------- 64 .. [1] Karim et al. Multivariate LSTM-FCNs for Time Series Classification, 2019 65 https://arxiv.org/pdf/1801.04503.pdf 66 67 """ 68 69 _tags = {"python_dependencies": "tensorflow"} 70 71 def __init__( 72 self, 73 n_epochs=100, 74 batch_size=128, 75 dropout=0.8, 76 kernel_sizes=(8, 5, 3), 77 filter_sizes=(128, 256, 128), 78 lstm_size=8, 79 attention=False, 80 callbacks=None, 81 random_state=None, 82 verbose=0, 83 ): 84 85 super(LSTMFCNClassifier, self).__init__() 86 87 # calced in fit 88 self.classes_ = None 89 self.input_shape = None 90 self.model_ = None 91 self.history = None 92 93 # predefined 94 self.n_epochs = n_epochs 95 self.batch_size = batch_size 96 self.kernel_sizes = kernel_sizes 97 self.filter_sizes = filter_sizes 98 self.lstm_size = lstm_size 99 self.dropout = dropout 100 self.attention = attention 101 102 self.callbacks = callbacks 103 self.random_state = random_state 104 self.verbose = verbose 105 106 self._network = LSTMFCNNetwork( 107 kernel_sizes=self.kernel_sizes, 108 filter_sizes=self.filter_sizes, 109 random_state=self.random_state, 110 lstm_size=self.lstm_size, 111 dropout=self.dropout, 112 attention=self.attention, 113 ) 114 self._is_fitted = False 115 116 def build_model(self, input_shape, n_classes, **kwargs): 117 """ 118 Construct a compiled, un-trained, keras model that is ready for training. 119 120 ---------- 121 input_shape : tuple 122 The shape of the data fed into the input layer 123 n_classes: int 124 The number of classes, which shall become the size of the output 125 layer 126 Returns 127 ------- 128 output : a compiled Keras Model 129 """ 130 import tensorflow as tf 131 from tensorflow import keras 132 133 tf.random.set_seed(self.random_state) 134 135 input_layers, output_layer = self._network.build_network(input_shape, **kwargs) 136 137 output_layer = keras.layers.Dense(units=n_classes, activation="softmax")( 138 output_layer 139 ) 140 141 model = keras.models.Model(inputs=input_layers, outputs=output_layer) 142 143 model.compile( 144 loss="categorical_crossentropy", 145 optimizer="adam", 146 metrics=["accuracy"], 147 ) 148 149 if self.callbacks is None: 150 self._callbacks = [] 151 152 return model 153 154 def _fit(self, X, y): 155 """ 156 Fit the classifier on the training set (X, y). 157 158 ---------- 159 X : a nested pd.Dataframe, or (if input_checks=False) array-like of 160 shape = (n_instances, series_length, n_dimensions) 161 The training input samples. If a 2D array-like is passed, 162 n_dimensions is assumed to be 1. 163 y : array-like, shape = [n_instances] 164 The training data class labels. 165 input_checks : boolean 166 whether to check the X and y parameters 167 validation_X : a nested pd.Dataframe, or array-like of shape = 168 (n_instances, series_length, n_dimensions) 169 The validation samples. If a 2D array-like is passed, 170 n_dimensions is assumed to be 1. 171 Unless strictly defined by the user via callbacks (such as 172 EarlyStopping), the presence or state of the validation 173 data does not alter training in any way. Predictions at each epoch 174 are stored in the model's fit history. 175 validation_y : array-like, shape = [n_instances] 176 The validation class labels. 177 178 Returns 179 ------- 180 self : object 181 """ 182 check_random_state(self.random_state) 183 184 y_onehot = self.convert_y_to_keras(y) 185 186 # Remove? 187 # Transpose to conform to Keras input style. 188 X = X.transpose(0, 2, 1) 189 190 # ignore the number of instances, X.shape[0], 191 # just want the shape of each instance 192 self.input_shape = X.shape[1:] 193 194 self.model_ = self.build_model(self.input_shape, self.n_classes_) 195 196 if self.verbose: 197 self.model_.summary() 198 199 self.history = self.model_.fit( 200 X, 201 y_onehot, 202 batch_size=self.batch_size, 203 epochs=self.n_epochs, 204 verbose=self.verbose, 205 callbacks=self._callbacks, 206 ) 207 208 self._is_fitted = True 209 210 return self 211 212 @classmethod 213 def get_test_params(cls, parameter_set="default"): 214 """Return testing parameter settings for the estimator. 215 216 Parameters 217 ---------- 218 parameter_set : str, default="default" 219 Name of the set of test parameters to return, for use in tests. If no 220 special parameters are defined for a value, will return `"default"` set. 221 For classifiers, a "default" set of parameters should be provided for 222 general testing, and a "results_comparison" set for comparing against 223 previously recorded results if the general set does not produce suitable 224 probabilities to compare against. 225 226 Returns 227 ------- 228 params : dict or list of dict, default={} 229 Parameters to create testing instances of the class. 230 Each dict are parameters to construct an "interesting" test instance, i.e., 231 `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance. 232 `create_test_instance` uses the first (or only) dictionary in `params`. 233 """ 234 param1 = { 235 "n_epochs": 50, 236 } 237 238 param2 = { 239 "n_epochs": 100, 240 } 241 242 return [param1, param2] 243 [end of sktime/classification/deep_learning/lstmfcn.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sktime/classification/deep_learning/lstmfcn.py b/sktime/classification/deep_learning/lstmfcn.py --- a/sktime/classification/deep_learning/lstmfcn.py +++ b/sktime/classification/deep_learning/lstmfcn.py @@ -70,7 +70,7 @@ def __init__( self, - n_epochs=100, + n_epochs=2000, batch_size=128, dropout=0.8, kernel_sizes=(8, 5, 3), @@ -84,7 +84,6 @@ super(LSTMFCNClassifier, self).__init__() - # calced in fit self.classes_ = None self.input_shape = None self.model_ = None @@ -232,11 +231,19 @@ `create_test_instance` uses the first (or only) dictionary in `params`. """ param1 = { - "n_epochs": 50, + "n_epochs": 25, + "batch_size": 4, + "kernel_sizes": (3, 2, 1), + "filter_sizes": (2, 4, 2), } param2 = { - "n_epochs": 100, + "n_epochs": 5, + "batch_size": 2, + "kernel_sizes": (3, 2, 1), + "filter_sizes": (2, 4, 2), + "lstm_size": 2, + "attention": True, } return [param1, param2]
{"golden_diff": "diff --git a/sktime/classification/deep_learning/lstmfcn.py b/sktime/classification/deep_learning/lstmfcn.py\n--- a/sktime/classification/deep_learning/lstmfcn.py\n+++ b/sktime/classification/deep_learning/lstmfcn.py\n@@ -70,7 +70,7 @@\n \n def __init__(\n self,\n- n_epochs=100,\n+ n_epochs=2000,\n batch_size=128,\n dropout=0.8,\n kernel_sizes=(8, 5, 3),\n@@ -84,7 +84,6 @@\n \n super(LSTMFCNClassifier, self).__init__()\n \n- # calced in fit\n self.classes_ = None\n self.input_shape = None\n self.model_ = None\n@@ -232,11 +231,19 @@\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n param1 = {\n- \"n_epochs\": 50,\n+ \"n_epochs\": 25,\n+ \"batch_size\": 4,\n+ \"kernel_sizes\": (3, 2, 1),\n+ \"filter_sizes\": (2, 4, 2),\n }\n \n param2 = {\n- \"n_epochs\": 100,\n+ \"n_epochs\": 5,\n+ \"batch_size\": 2,\n+ \"kernel_sizes\": (3, 2, 1),\n+ \"filter_sizes\": (2, 4, 2),\n+ \"lstm_size\": 2,\n+ \"attention\": True,\n }\n \n return [param1, param2]\n", "issue": "[BUG] LSTM deep learning estimators failing CI on windows\nSince recently, a failure of two deep learning estimators has been appearing on windows CI:\r\n```\r\nFAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[MLPClassifier-1-ClassifierFitPredictMultivariate-predict]\r\nFAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[LSTMFCNClassifier-0-ClassifierFitPredictMultivariate-predict]\r\n```\r\n\r\nNote that the failure appears to be only on python 3.9, but this is due to the matrix design which spreads estimators across version/OS combinations.\r\n\r\nIn theory, it could be a 3.9 specific failure, but I think that is less likely than windows specifity (although it may be worth to test that by turning the `matrixdesign` flag off in the CI).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"LongShort Term Memory Fully Convolutional Network (LSTM-FCN).\"\"\"\n__author__ = [\"jnrusson1\", \"solen0id\"]\n\n__all__ = [\"LSTMFCNClassifier\"]\n\nfrom sklearn.utils import check_random_state\n\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\nfrom sktime.networks.lstmfcn import LSTMFCNNetwork\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass LSTMFCNClassifier(BaseDeepClassifier):\n \"\"\"\n\n Implementation of LSTMFCNClassifier from Karim et al (2019) [1].\n\n Overview\n --------\n Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the\n LSTM which the author indicates provides improved performance.\n\n\n Parameters\n ----------\n n_epochs: int, default=2000\n the number of epochs to train the model\n batch_size: int, default=128\n the number of samples per gradient update.\n dropout: float, default=0.8\n controls dropout rate of LSTM layer\n kernel_sizes: list of ints, default=[8, 5, 3]\n specifying the length of the 1D convolution windows\n filter_sizes: int, list of ints, default=[128, 256, 128]\n size of filter for each conv layer\n lstm_size: int, default=8\n output dimension for LSTM layer\n attention: boolean, default=False\n If True, uses custom attention LSTM layer\n callbacks: keras callbacks, default=ReduceLRonPlateau\n Keras callbacks to use such as learning rate reduction or saving best model\n based on validation error\n verbose: 'auto', 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n 'auto' defaults to 1 for most cases, but 2 when used with\n `ParameterServerStrategy`. Note that the progress bar is not\n particularly useful when logged to a file, so verbose=2 is\n recommended when not running interactively (eg, in a production\n environment).\n random_state : int or None, default=None\n Seed for random, integer.\n\n\n Notes\n -----\n Ported from sktime-dl source code\n https://github.com/sktime/sktime-dl/blob/master/sktime_dl/classification/_lstmfcn.py\n\n References\n ----------\n .. [1] Karim et al. Multivariate LSTM-FCNs for Time Series Classification, 2019\n https://arxiv.org/pdf/1801.04503.pdf\n\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n n_epochs=100,\n batch_size=128,\n dropout=0.8,\n kernel_sizes=(8, 5, 3),\n filter_sizes=(128, 256, 128),\n lstm_size=8,\n attention=False,\n callbacks=None,\n random_state=None,\n verbose=0,\n ):\n\n super(LSTMFCNClassifier, self).__init__()\n\n # calced in fit\n self.classes_ = None\n self.input_shape = None\n self.model_ = None\n self.history = None\n\n # predefined\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.kernel_sizes = kernel_sizes\n self.filter_sizes = filter_sizes\n self.lstm_size = lstm_size\n self.dropout = dropout\n self.attention = attention\n\n self.callbacks = callbacks\n self.random_state = random_state\n self.verbose = verbose\n\n self._network = LSTMFCNNetwork(\n kernel_sizes=self.kernel_sizes,\n filter_sizes=self.filter_sizes,\n random_state=self.random_state,\n lstm_size=self.lstm_size,\n dropout=self.dropout,\n attention=self.attention,\n )\n self._is_fitted = False\n\n def build_model(self, input_shape, n_classes, **kwargs):\n \"\"\"\n Construct a compiled, un-trained, keras model that is ready for training.\n\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer\n n_classes: int\n The number of classes, which shall become the size of the output\n layer\n Returns\n -------\n output : a compiled Keras Model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_state)\n\n input_layers, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(units=n_classes, activation=\"softmax\")(\n output_layer\n )\n\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\n\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"],\n )\n\n if self.callbacks is None:\n self._callbacks = []\n\n return model\n\n def _fit(self, X, y):\n \"\"\"\n Fit the classifier on the training set (X, y).\n\n ----------\n X : a nested pd.Dataframe, or (if input_checks=False) array-like of\n shape = (n_instances, series_length, n_dimensions)\n The training input samples. If a 2D array-like is passed,\n n_dimensions is assumed to be 1.\n y : array-like, shape = [n_instances]\n The training data class labels.\n input_checks : boolean\n whether to check the X and y parameters\n validation_X : a nested pd.Dataframe, or array-like of shape =\n (n_instances, series_length, n_dimensions)\n The validation samples. If a 2D array-like is passed,\n n_dimensions is assumed to be 1.\n Unless strictly defined by the user via callbacks (such as\n EarlyStopping), the presence or state of the validation\n data does not alter training in any way. Predictions at each epoch\n are stored in the model's fit history.\n validation_y : array-like, shape = [n_instances]\n The validation class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n check_random_state(self.random_state)\n\n y_onehot = self.convert_y_to_keras(y)\n\n # Remove?\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n\n # ignore the number of instances, X.shape[0],\n # just want the shape of each instance\n self.input_shape = X.shape[1:]\n\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\n\n if self.verbose:\n self.model_.summary()\n\n self.history = self.model_.fit(\n X,\n y_onehot,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n\n self._is_fitted = True\n\n return self\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n param1 = {\n \"n_epochs\": 50,\n }\n\n param2 = {\n \"n_epochs\": 100,\n }\n\n return [param1, param2]\n", "path": "sktime/classification/deep_learning/lstmfcn.py"}]}
3,168
378
gh_patches_debug_26026
rasdani/github-patches
git_diff
python-discord__site-513
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unable to parse the GitHub repository metadata from response! Sentry Issue: [SITE-P](https://sentry.io/organizations/python-discord/issues/2093966668/?referrer=github_integration) ``` Unable to parse the GitHub repository metadata from response! ``` This is caused by us hitting github ratelimits, as we're not authenticating with the API. </issue> <code> [start of pydis_site/constants.py] 1 import os 2 3 GIT_SHA = os.environ.get("GIT_SHA", "development") 4 [end of pydis_site/constants.py] [start of pydis_site/apps/home/views/home.py] 1 import logging 2 from typing import Dict, List 3 4 import requests 5 from django.core.handlers.wsgi import WSGIRequest 6 from django.http import HttpResponse 7 from django.shortcuts import render 8 from django.utils import timezone 9 from django.views import View 10 11 from pydis_site.apps.home.models import RepositoryMetadata 12 13 log = logging.getLogger(__name__) 14 15 16 class HomeView(View): 17 """The main landing page for the website.""" 18 19 github_api = "https://api.github.com/users/python-discord/repos?per_page=100" 20 repository_cache_ttl = 3600 21 22 # Which of our GitHub repos should be displayed on the front page, and in which order? 23 repos = [ 24 "python-discord/site", 25 "python-discord/bot", 26 "python-discord/snekbox", 27 "python-discord/sir-lancebot", 28 "python-discord/metricity", 29 "python-discord/django-simple-bulma", 30 ] 31 32 def __init__(self): 33 """Clean up stale RepositoryMetadata.""" 34 RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete() 35 36 def _get_api_data(self) -> Dict[str, Dict[str, str]]: 37 """ 38 Call the GitHub API and get information about our repos. 39 40 If we're unable to get that info for any reason, return an empty dict. 41 """ 42 repo_dict = {} 43 44 # Fetch the data from the GitHub API 45 api_data: List[dict] = requests.get(self.github_api).json() 46 47 # Process the API data into our dict 48 for repo in api_data: 49 try: 50 full_name = repo["full_name"] 51 52 if full_name in self.repos: 53 repo_dict[full_name] = { 54 "full_name": repo["full_name"], 55 "description": repo["description"], 56 "language": repo["language"], 57 "forks_count": repo["forks_count"], 58 "stargazers_count": repo["stargazers_count"], 59 } 60 # Something is not right about the API data we got back from GitHub. 61 except (TypeError, ConnectionError, KeyError) as e: 62 log.error( 63 "Unable to parse the GitHub repository metadata from response!", 64 extra={ 65 'api_data': api_data, 66 'error': e 67 } 68 ) 69 continue 70 71 return repo_dict 72 73 def _get_repo_data(self) -> List[RepositoryMetadata]: 74 """Build a list of RepositoryMetadata objects that we can use to populate the front page.""" 75 database_repositories = [] 76 77 # First, let's see if we have any metadata cached. 78 cached_data = RepositoryMetadata.objects.all() 79 80 # If we don't, we have to create some! 81 if not cached_data: 82 83 # Try to get new data from the API. If it fails, we'll return an empty list. 84 # In this case, we simply don't display our projects on the site. 85 api_repositories = self._get_api_data() 86 87 # Create all the repodata records in the database. 88 for api_data in api_repositories.values(): 89 repo_data = RepositoryMetadata( 90 repo_name=api_data["full_name"], 91 description=api_data["description"], 92 forks=api_data["forks_count"], 93 stargazers=api_data["stargazers_count"], 94 language=api_data["language"], 95 ) 96 97 repo_data.save() 98 database_repositories.append(repo_data) 99 100 return database_repositories 101 102 # If the data is stale, we should refresh it. 103 if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl: 104 # Try to get new data from the API. If it fails, return the cached data. 105 api_repositories = self._get_api_data() 106 107 if not api_repositories: 108 return RepositoryMetadata.objects.all() 109 110 # Update or create all RepoData objects in self.repos 111 for repo_name, api_data in api_repositories.items(): 112 try: 113 repo_data = RepositoryMetadata.objects.get(repo_name=repo_name) 114 repo_data.description = api_data["description"] 115 repo_data.language = api_data["language"] 116 repo_data.forks = api_data["forks_count"] 117 repo_data.stargazers = api_data["stargazers_count"] 118 except RepositoryMetadata.DoesNotExist: 119 repo_data = RepositoryMetadata( 120 repo_name=api_data["full_name"], 121 description=api_data["description"], 122 forks=api_data["forks_count"], 123 stargazers=api_data["stargazers_count"], 124 language=api_data["language"], 125 ) 126 repo_data.save() 127 database_repositories.append(repo_data) 128 return database_repositories 129 130 # Otherwise, if the data is fresher than 2 minutes old, we should just return it. 131 else: 132 return RepositoryMetadata.objects.all() 133 134 def get(self, request: WSGIRequest) -> HttpResponse: 135 """Collect repo data and render the homepage view.""" 136 repo_data = self._get_repo_data() 137 return render(request, "home/index.html", {"repo_data": repo_data}) 138 139 140 def timeline(request: WSGIRequest) -> HttpResponse: 141 """Render timeline view.""" 142 return render(request, 'home/timeline.html') 143 [end of pydis_site/apps/home/views/home.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py --- a/pydis_site/apps/home/views/home.py +++ b/pydis_site/apps/home/views/home.py @@ -9,6 +9,7 @@ from django.views import View from pydis_site.apps.home.models import RepositoryMetadata +from pydis_site.constants import GITHUB_TOKEN log = logging.getLogger(__name__) @@ -18,6 +19,7 @@ github_api = "https://api.github.com/users/python-discord/repos?per_page=100" repository_cache_ttl = 3600 + headers = {"Authorization": f"token {GITHUB_TOKEN}"} # Which of our GitHub repos should be displayed on the front page, and in which order? repos = [ @@ -42,7 +44,7 @@ repo_dict = {} # Fetch the data from the GitHub API - api_data: List[dict] = requests.get(self.github_api).json() + api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json() # Process the API data into our dict for repo in api_data: diff --git a/pydis_site/constants.py b/pydis_site/constants.py --- a/pydis_site/constants.py +++ b/pydis_site/constants.py @@ -1,3 +1,4 @@ import os GIT_SHA = os.environ.get("GIT_SHA", "development") +GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
{"golden_diff": "diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py\n--- a/pydis_site/apps/home/views/home.py\n+++ b/pydis_site/apps/home/views/home.py\n@@ -9,6 +9,7 @@\n from django.views import View\n \n from pydis_site.apps.home.models import RepositoryMetadata\n+from pydis_site.constants import GITHUB_TOKEN\n \n log = logging.getLogger(__name__)\n \n@@ -18,6 +19,7 @@\n \n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n+ headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n \n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n@@ -42,7 +44,7 @@\n repo_dict = {}\n \n # Fetch the data from the GitHub API\n- api_data: List[dict] = requests.get(self.github_api).json()\n+ api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n \n # Process the API data into our dict\n for repo in api_data:\ndiff --git a/pydis_site/constants.py b/pydis_site/constants.py\n--- a/pydis_site/constants.py\n+++ b/pydis_site/constants.py\n@@ -1,3 +1,4 @@\n import os\n \n GIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n+GITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "issue": "Unable to parse the GitHub repository metadata from response!\nSentry Issue: [SITE-P](https://sentry.io/organizations/python-discord/issues/2093966668/?referrer=github_integration)\n\n```\nUnable to parse the GitHub repository metadata from response!\n```\n\nThis is caused by us hitting github ratelimits, as we're not authenticating with the API.\n", "before_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n database_repositories = []\n\n # First, let's see if we have any metadata cached.\n cached_data = RepositoryMetadata.objects.all()\n\n # If we don't, we have to create some!\n if not cached_data:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n for api_data in api_repositories.values():\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n\n repo_data.save()\n database_repositories.append(repo_data)\n\n return database_repositories\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n for repo_name, api_data in api_repositories.items():\n try:\n repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)\n repo_data.description = api_data[\"description\"]\n repo_data.language = api_data[\"language\"]\n repo_data.forks = api_data[\"forks_count\"]\n repo_data.stargazers = api_data[\"stargazers_count\"]\n except RepositoryMetadata.DoesNotExist:\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n repo_data.save()\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}]}
2,094
335
gh_patches_debug_3554
rasdani/github-patches
git_diff
microsoft__torchgeo-2000
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Auto download fails for FireRisk ### Description Auto download fails for the FireRisk dataset hosted on Google Drive. Warning and error: ```bash /home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py:260: UserWarning: We detected some HTML elements in the downloaded file. This most likely means that the dow nload triggered an unhandled API response by GDrive. Please report this to torchvision at https://github.com/pytorch/vision/issues including the response: <!DOCTYPE html><html><head><title>Google Drive - Virus scan warning</title><meta http-equiv="content-type" content="text/html; charset=utf-8"/><style nonce="Udd3l48zF0spb_ikIDzQdw">.goog-link-button{position:rel ative;color:#15c;text-decoration:underline;cursor:pointer}.goog-link-button-disabled{color:#ccc;text-decoration:none;cursor:default}body{color:#222;font:normal 13px/1.4 arial,sans-serif;margin:0}.grecaptcha-badg e{visibility:hidden}.uc-main{padding-top:50px;text-align:center}#uc-dl-icon{display:inline-block;margin-top:16px;padding-right:1em;vertical-align:top}#uc-text{display:inline-block;max-width:68ex;text-align:left}.uc-error-caption,.uc-warning-caption{color:#222;font-size:16px}#uc-download-link{text-decoration:none}.uc-name-size a{color:#15c;text-decoration:none}.uc-name-size a:visited{color:#61c;text-decoration:none}.uc-name-size a:active{color:#d14836;text-decoration:none}.uc-footer{color:#777;font-size:11px;padding-bottom:5ex;padding-top:5ex;text-align:center}.uc-footer a{color:#15c}.uc-footer a:visited{color:#61c}.uc-footer a:active{color:#d14836}.uc-footer-divider{color:#ccc;width:100%}.goog-inline-block{position:relative;display:-moz-inline-box;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}sentinel{}</style><link rel="icon" href="//ssl.gstatic.com/docs/doclist/images/drive_2022q3_32dp.png"/></head><body><div class="uc-main"><div id="uc-dl-icon" class="image-container"><div class="drive-sprite-aux-download-file"></div></div><div id="uc-text"><p class="uc-warning-caption">Google Drive can't scan this file for viruses.</p><p class="uc-warning-subcaption"><span class="uc-name-size"><a href="/open?id=1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP">FireRisk.zip</a> (14G)</span> is too large for Google to scan for viruses. Would you still like to download this file?</p><form id="download-form" action="https://drive.usercontent.google.com/download" method="get"><input type="submit" id="uc-download-link" class="goog-inline-block jfk-button jfk-button-action" value="Download anyway"/><input type="hidden" name="id" value="1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP"><input type="hidden" name="export" value="download"><input type="hidden" name="confirm" value="t"><input type="hidden" name="uuid" value="c4203717-b28d-4640-8d59-e9f5d88a2120"></form></div></div><div class="uc-footer"><hr class="uc-footer-divider"></div></body></html> warnings.warn( Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jb/code/torchgeo/slip/datasets/firerisk.py", line 25, in __init__ super().__init__(root=root, split=split, download=download, checksum=checksum) File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py", line 94, in __init__ self._verify() File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py", line 126, in _verify self._download() File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py", line 131, in _download download_url( File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py", line 139, in download_url return download_file_from_google_drive(file_id, root, filename, md5) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py", line 268, in download_file_from_google_drive raise RuntimeError( RuntimeError: The MD5 checksum of the download file /data/labeleff/datasets/firerisk/FireRisk.zip does not match the one on record.Please delete the file and try again. If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues. ``` ### Steps to reproduce ```python from torchgeo.datasets import FireRisk dataset = FireRisk(download=True, checksum=True) ``` ### Version 0.5.1 </issue> <code> [start of torchgeo/datasets/fire_risk.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 """FireRisk dataset.""" 5 6 import os 7 from collections.abc import Callable 8 from typing import cast 9 10 import matplotlib.pyplot as plt 11 from matplotlib.figure import Figure 12 from torch import Tensor 13 14 from .geo import NonGeoClassificationDataset 15 from .utils import DatasetNotFoundError, download_url, extract_archive 16 17 18 class FireRisk(NonGeoClassificationDataset): 19 """FireRisk dataset. 20 21 The `FireRisk <https://github.com/CharmonyShen/FireRisk>`__ 22 dataset is a dataset for remote sensing fire risk classification. 23 24 Dataset features: 25 26 * 91,872 images with 1 m per pixel resolution (320x320 px) 27 * 70,331 and 21,541 train and val images, respectively 28 * three spectral bands - RGB 29 * 7 fire risk classes 30 * images extracted from NAIP tiles 31 32 Dataset format: 33 34 * images are three-channel pngs 35 36 Dataset classes: 37 38 0. high 39 1. low 40 2. moderate 41 3. non-burnable 42 4. very_high 43 5. very_low 44 6. water 45 46 If you use this dataset in your research, please cite the following paper: 47 48 * https://arxiv.org/abs/2303.07035 49 50 .. versionadded:: 0.5 51 """ 52 53 url = "https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP" 54 md5 = "a77b9a100d51167992ae8c51d26198a6" 55 filename = "FireRisk.zip" 56 directory = "FireRisk" 57 splits = ["train", "val"] 58 classes = [ 59 "High", 60 "Low", 61 "Moderate", 62 "Non-burnable", 63 "Very_High", 64 "Very_Low", 65 "Water", 66 ] 67 68 def __init__( 69 self, 70 root: str = "data", 71 split: str = "train", 72 transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None, 73 download: bool = False, 74 checksum: bool = False, 75 ) -> None: 76 """Initialize a new FireRisk dataset instance. 77 78 Args: 79 root: root directory where dataset can be found 80 split: one of "train" or "val" 81 transforms: a function/transform that takes input sample and its target as 82 entry and returns a transformed version 83 download: if True, download dataset and store it in the root directory 84 checksum: if True, check the MD5 of the downloaded files (may be slow) 85 86 Raises: 87 AssertionError: if ``split`` argument is invalid 88 DatasetNotFoundError: If dataset is not found and *download* is False. 89 """ 90 assert split in self.splits 91 self.root = root 92 self.split = split 93 self.download = download 94 self.checksum = checksum 95 self._verify() 96 97 super().__init__( 98 root=os.path.join(root, self.directory, self.split), transforms=transforms 99 ) 100 101 def _verify(self) -> None: 102 """Verify the integrity of the dataset.""" 103 # Check if the files already exist 104 path = os.path.join(self.root, self.directory) 105 if os.path.exists(path): 106 return 107 108 # Check if zip file already exists (if so then extract) 109 filepath = os.path.join(self.root, self.filename) 110 if os.path.exists(filepath): 111 self._extract() 112 return 113 114 # Check if the user requested to download the dataset 115 if not self.download: 116 raise DatasetNotFoundError(self) 117 118 # Download and extract the dataset 119 self._download() 120 self._extract() 121 122 def _download(self) -> None: 123 """Download the dataset.""" 124 download_url( 125 self.url, 126 self.root, 127 filename=self.filename, 128 md5=self.md5 if self.checksum else None, 129 ) 130 131 def _extract(self) -> None: 132 """Extract the dataset.""" 133 filepath = os.path.join(self.root, self.filename) 134 extract_archive(filepath) 135 136 def plot( 137 self, 138 sample: dict[str, Tensor], 139 show_titles: bool = True, 140 suptitle: str | None = None, 141 ) -> Figure: 142 """Plot a sample from the dataset. 143 144 Args: 145 sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__` 146 show_titles: flag indicating whether to show titles above each panel 147 suptitle: optional string to use as a suptitle 148 149 Returns: 150 a matplotlib Figure with the rendered sample 151 """ 152 image = sample["image"].permute((1, 2, 0)).numpy() 153 label = cast(int, sample["label"].item()) 154 label_class = self.classes[label] 155 156 showing_predictions = "prediction" in sample 157 if showing_predictions: 158 prediction = cast(int, sample["prediction"].item()) 159 prediction_class = self.classes[prediction] 160 161 fig, ax = plt.subplots(figsize=(4, 4)) 162 ax.imshow(image) 163 ax.axis("off") 164 if show_titles: 165 title = f"Label: {label_class}" 166 if showing_predictions: 167 title += f"\nPrediction: {prediction_class}" 168 ax.set_title(title) 169 170 if suptitle is not None: 171 plt.suptitle(suptitle) 172 return fig 173 [end of torchgeo/datasets/fire_risk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchgeo/datasets/fire_risk.py b/torchgeo/datasets/fire_risk.py --- a/torchgeo/datasets/fire_risk.py +++ b/torchgeo/datasets/fire_risk.py @@ -50,7 +50,7 @@ .. versionadded:: 0.5 """ - url = "https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP" + url = "https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip" md5 = "a77b9a100d51167992ae8c51d26198a6" filename = "FireRisk.zip" directory = "FireRisk"
{"golden_diff": "diff --git a/torchgeo/datasets/fire_risk.py b/torchgeo/datasets/fire_risk.py\n--- a/torchgeo/datasets/fire_risk.py\n+++ b/torchgeo/datasets/fire_risk.py\n@@ -50,7 +50,7 @@\n .. versionadded:: 0.5\n \"\"\"\n \n- url = \"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"\n+ url = \"https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip\"\n md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n filename = \"FireRisk.zip\"\n directory = \"FireRisk\"\n", "issue": "Auto download fails for FireRisk\n### Description\n\nAuto download fails for the FireRisk dataset hosted on Google Drive.\r\n\r\nWarning and error:\r\n```bash\r\n/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py:260: UserWarning: We detected some HTML elements in the downloaded file. This most likely means that the dow\r\nnload triggered an unhandled API response by GDrive. Please report this to torchvision at https://github.com/pytorch/vision/issues including the response:\r\n\r\n<!DOCTYPE html><html><head><title>Google Drive - Virus scan warning</title><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/><style nonce=\"Udd3l48zF0spb_ikIDzQdw\">.goog-link-button{position:rel\r\native;color:#15c;text-decoration:underline;cursor:pointer}.goog-link-button-disabled{color:#ccc;text-decoration:none;cursor:default}body{color:#222;font:normal 13px/1.4 arial,sans-serif;margin:0}.grecaptcha-badg\r\ne{visibility:hidden}.uc-main{padding-top:50px;text-align:center}#uc-dl-icon{display:inline-block;margin-top:16px;padding-right:1em;vertical-align:top}#uc-text{display:inline-block;max-width:68ex;text-align:left}.uc-error-caption,.uc-warning-caption{color:#222;font-size:16px}#uc-download-link{text-decoration:none}.uc-name-size a{color:#15c;text-decoration:none}.uc-name-size a:visited{color:#61c;text-decoration:none}.uc-name-size a:active{color:#d14836;text-decoration:none}.uc-footer{color:#777;font-size:11px;padding-bottom:5ex;padding-top:5ex;text-align:center}.uc-footer a{color:#15c}.uc-footer a:visited{color:#61c}.uc-footer a:active{color:#d14836}.uc-footer-divider{color:#ccc;width:100%}.goog-inline-block{position:relative;display:-moz-inline-box;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}sentinel{}</style><link rel=\"icon\" href=\"//ssl.gstatic.com/docs/doclist/images/drive_2022q3_32dp.png\"/></head><body><div class=\"uc-main\"><div id=\"uc-dl-icon\" class=\"image-container\"><div class=\"drive-sprite-aux-download-file\"></div></div><div id=\"uc-text\"><p class=\"uc-warning-caption\">Google Drive can't scan this file for viruses.</p><p class=\"uc-warning-subcaption\"><span class=\"uc-name-size\"><a href=\"/open?id=1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\">FireRisk.zip</a> (14G)</span> is too large for Google to scan for viruses. Would you still like to download this file?</p><form id=\"download-form\" action=\"https://drive.usercontent.google.com/download\" method=\"get\"><input type=\"submit\" id=\"uc-download-link\" class=\"goog-inline-block jfk-button jfk-button-action\" value=\"Download anyway\"/><input type=\"hidden\" name=\"id\" value=\"1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"><input type=\"hidden\" name=\"export\" value=\"download\"><input type=\"hidden\" name=\"confirm\" value=\"t\"><input type=\"hidden\" name=\"uuid\" value=\"c4203717-b28d-4640-8d59-e9f5d88a2120\"></form></div></div><div class=\"uc-footer\"><hr class=\"uc-footer-divider\"></div></body></html>\r\n warnings.warn(\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jb/code/torchgeo/slip/datasets/firerisk.py\", line 25, in __init__\r\n super().__init__(root=root, split=split, download=download, checksum=checksum)\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 94, in __init__\r\n self._verify()\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 126, in _verify\r\n self._download()\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 131, in _download\r\n download_url(\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\", line 139, in download_url\r\n return download_file_from_google_drive(file_id, root, filename, md5)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\", line 268, in download_file_from_google_drive\r\n raise RuntimeError(\r\nRuntimeError: The MD5 checksum of the download file /data/labeleff/datasets/firerisk/FireRisk.zip does not match the one on record.Please delete the file and try again. If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues.\r\n```\n\n### Steps to reproduce\n\n```python\r\nfrom torchgeo.datasets import FireRisk\r\ndataset = FireRisk(download=True, checksum=True)\r\n```\n\n### Version\n\n0.5.1\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"FireRisk dataset.\"\"\"\n\nimport os\nfrom collections.abc import Callable\nfrom typing import cast\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoClassificationDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass FireRisk(NonGeoClassificationDataset):\n \"\"\"FireRisk dataset.\n\n The `FireRisk <https://github.com/CharmonyShen/FireRisk>`__\n dataset is a dataset for remote sensing fire risk classification.\n\n Dataset features:\n\n * 91,872 images with 1 m per pixel resolution (320x320 px)\n * 70,331 and 21,541 train and val images, respectively\n * three spectral bands - RGB\n * 7 fire risk classes\n * images extracted from NAIP tiles\n\n Dataset format:\n\n * images are three-channel pngs\n\n Dataset classes:\n\n 0. high\n 1. low\n 2. moderate\n 3. non-burnable\n 4. very_high\n 5. very_low\n 6. water\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://arxiv.org/abs/2303.07035\n\n .. versionadded:: 0.5\n \"\"\"\n\n url = \"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"\n md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n filename = \"FireRisk.zip\"\n directory = \"FireRisk\"\n splits = [\"train\", \"val\"]\n classes = [\n \"High\",\n \"Low\",\n \"Moderate\",\n \"Non-burnable\",\n \"Very_High\",\n \"Very_Low\",\n \"Water\",\n ]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new FireRisk dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\" or \"val\"\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if ``split`` argument is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert split in self.splits\n self.root = root\n self.split = split\n self.download = download\n self.checksum = checksum\n self._verify()\n\n super().__init__(\n root=os.path.join(root, self.directory, self.split), transforms=transforms\n )\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the files already exist\n path = os.path.join(self.root, self.directory)\n if os.path.exists(path):\n return\n\n # Check if zip file already exists (if so then extract)\n filepath = os.path.join(self.root, self.filename)\n if os.path.exists(filepath):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download and extract the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.filename,\n md5=self.md5 if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n filepath = os.path.join(self.root, self.filename)\n extract_archive(filepath)\n\n def plot(\n self,\n sample: dict[str, Tensor],\n show_titles: bool = True,\n suptitle: str | None = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n image = sample[\"image\"].permute((1, 2, 0)).numpy()\n label = cast(int, sample[\"label\"].item())\n label_class = self.classes[label]\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = cast(int, sample[\"prediction\"].item())\n prediction_class = self.classes[prediction]\n\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.imshow(image)\n ax.axis(\"off\")\n if show_titles:\n title = f\"Label: {label_class}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction_class}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n", "path": "torchgeo/datasets/fire_risk.py"}]}
3,478
221
gh_patches_debug_37959
rasdani/github-patches
git_diff
openvinotoolkit__datumaro-371
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Import ImageNet dataset ### Steps to reproduce problem 1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data); 2. Add the loaded dataset into a Datumaro project; 3. Run `datum info`. ### Current behaviour ImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels. ### Expected behaviour Correct import. ### Environment `git log -1`: 7e35c8 </issue> <code> [start of datumaro/plugins/imagenet_format.py] 1 # Copyright (C) 2020 Intel Corporation 2 # 3 # SPDX-License-Identifier: MIT 4 5 import logging as log 6 import os 7 import os.path as osp 8 9 from datumaro.components.converter import Converter 10 from datumaro.components.extractor import ( 11 AnnotationType, DatasetItem, Importer, Label, LabelCategories, 12 SourceExtractor, 13 ) 14 from datumaro.util.image import find_images 15 16 17 class ImagenetPath: 18 IMAGE_DIR_NO_LABEL = 'no_label' 19 20 21 class ImagenetExtractor(SourceExtractor): 22 def __init__(self, path, subset=None): 23 assert osp.isdir(path), path 24 super().__init__(subset=subset) 25 26 self._categories = self._load_categories(path) 27 self._items = list(self._load_items(path).values()) 28 29 def _load_categories(self, path): 30 label_cat = LabelCategories() 31 for dirname in sorted(os.listdir(path)): 32 if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL: 33 label_cat.add(dirname) 34 return { AnnotationType.label: label_cat } 35 36 def _load_items(self, path): 37 items = {} 38 39 for image_path in find_images(path, recursive=True, max_depth=1): 40 label = osp.basename(osp.dirname(image_path)) 41 image_name = osp.splitext(osp.basename(image_path))[0] 42 if image_name.startswith(label + '_'): 43 image_name = image_name[len(label) + 1:] 44 45 item = items.get(image_name) 46 if item is None: 47 item = DatasetItem(id=image_name, subset=self._subset, 48 image=image_path) 49 items[image_name] = item 50 annotations = item.annotations 51 52 if label != ImagenetPath.IMAGE_DIR_NO_LABEL: 53 label = self._categories[AnnotationType.label].find(label)[0] 54 annotations.append(Label(label=label)) 55 56 return items 57 58 59 class ImagenetImporter(Importer): 60 @classmethod 61 def find_sources(cls, path): 62 if not osp.isdir(path): 63 return [] 64 return [{ 'url': path, 'format': 'imagenet' }] 65 66 67 class ImagenetConverter(Converter): 68 DEFAULT_IMAGE_EXT = '.jpg' 69 70 def apply(self): 71 if 1 < len(self._extractor.subsets()): 72 log.warning("ImageNet format only supports exporting a single " 73 "subset, subset information will not be used.") 74 75 subset_dir = self._save_dir 76 extractor = self._extractor 77 labels = {} 78 for item in self._extractor: 79 labels = set(p.label for p in item.annotations 80 if p.type == AnnotationType.label) 81 82 for label in labels: 83 label_name = extractor.categories()[AnnotationType.label][label].name 84 self._save_image(item, osp.join(subset_dir, label_name, 85 '%s_%s' % (label_name, self._make_image_filename(item)))) 86 87 if not labels: 88 self._save_image(item, osp.join(subset_dir, 89 ImagenetPath.IMAGE_DIR_NO_LABEL, 90 ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \ 91 self._make_image_filename(item))) 92 [end of datumaro/plugins/imagenet_format.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py --- a/datumaro/plugins/imagenet_format.py +++ b/datumaro/plugins/imagenet_format.py @@ -39,14 +39,13 @@ for image_path in find_images(path, recursive=True, max_depth=1): label = osp.basename(osp.dirname(image_path)) image_name = osp.splitext(osp.basename(image_path))[0] - if image_name.startswith(label + '_'): - image_name = image_name[len(label) + 1:] - item = items.get(image_name) + item_id = osp.join(label, image_name) + item = items.get(item_id) if item is None: - item = DatasetItem(id=image_name, subset=self._subset, + item = DatasetItem(id=item_id, subset=self._subset, image=image_path) - items[image_name] = item + items[item_id] = item annotations = item.annotations if label != ImagenetPath.IMAGE_DIR_NO_LABEL: @@ -68,6 +67,13 @@ DEFAULT_IMAGE_EXT = '.jpg' def apply(self): + + def _get_dir_name(id_parts, label_name): + if 1 < len(id_parts) and id_parts[0] == label_name: + return '' + else: + return label_name + if 1 < len(self._extractor.subsets()): log.warning("ImageNet format only supports exporting a single " "subset, subset information will not be used.") @@ -76,16 +82,15 @@ extractor = self._extractor labels = {} for item in self._extractor: + id_parts = item.id.split('/') labels = set(p.label for p in item.annotations if p.type == AnnotationType.label) for label in labels: label_name = extractor.categories()[AnnotationType.label][label].name - self._save_image(item, osp.join(subset_dir, label_name, - '%s_%s' % (label_name, self._make_image_filename(item)))) + self._save_image(item, subdir=osp.join(subset_dir, + _get_dir_name(id_parts, label_name))) if not labels: - self._save_image(item, osp.join(subset_dir, - ImagenetPath.IMAGE_DIR_NO_LABEL, - ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \ - self._make_image_filename(item))) + self._save_image(item, subdir=osp.join(subset_dir, + _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))
{"golden_diff": "diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py\n--- a/datumaro/plugins/imagenet_format.py\n+++ b/datumaro/plugins/imagenet_format.py\n@@ -39,14 +39,13 @@\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n- if image_name.startswith(label + '_'):\n- image_name = image_name[len(label) + 1:]\n \n- item = items.get(image_name)\n+ item_id = osp.join(label, image_name)\n+ item = items.get(item_id)\n if item is None:\n- item = DatasetItem(id=image_name, subset=self._subset,\n+ item = DatasetItem(id=item_id, subset=self._subset,\n image=image_path)\n- items[image_name] = item\n+ items[item_id] = item\n annotations = item.annotations\n \n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n@@ -68,6 +67,13 @@\n DEFAULT_IMAGE_EXT = '.jpg'\n \n def apply(self):\n+\n+ def _get_dir_name(id_parts, label_name):\n+ if 1 < len(id_parts) and id_parts[0] == label_name:\n+ return ''\n+ else:\n+ return label_name\n+\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n@@ -76,16 +82,15 @@\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n+ id_parts = item.id.split('/')\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n \n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n- self._save_image(item, osp.join(subset_dir, label_name,\n- '%s_%s' % (label_name, self._make_image_filename(item))))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, label_name)))\n \n if not labels:\n- self._save_image(item, osp.join(subset_dir,\n- ImagenetPath.IMAGE_DIR_NO_LABEL,\n- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n- self._make_image_filename(item)))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\n", "issue": "Import ImageNet dataset\n### Steps to reproduce problem\r\n1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);\r\n2. Add the loaded dataset into a Datumaro project;\r\n3. Run `datum info`.\r\n\r\n### Current behaviour\r\nImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.\r\n\r\n### Expected behaviour\r\nCorrect import.\r\n\r\n### Environment\r\n`git log -1`: 7e35c8\n", "before_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\n\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.extractor import (\n AnnotationType, DatasetItem, Importer, Label, LabelCategories,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\n\n\nclass ImagenetPath:\n IMAGE_DIR_NO_LABEL = 'no_label'\n\n\nclass ImagenetExtractor(SourceExtractor):\n def __init__(self, path, subset=None):\n assert osp.isdir(path), path\n super().__init__(subset=subset)\n\n self._categories = self._load_categories(path)\n self._items = list(self._load_items(path).values())\n\n def _load_categories(self, path):\n label_cat = LabelCategories()\n for dirname in sorted(os.listdir(path)):\n if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label_cat.add(dirname)\n return { AnnotationType.label: label_cat }\n\n def _load_items(self, path):\n items = {}\n\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n if image_name.startswith(label + '_'):\n image_name = image_name[len(label) + 1:]\n\n item = items.get(image_name)\n if item is None:\n item = DatasetItem(id=image_name, subset=self._subset,\n image=image_path)\n items[image_name] = item\n annotations = item.annotations\n\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label = self._categories[AnnotationType.label].find(label)[0]\n annotations.append(Label(label=label))\n\n return items\n\n\nclass ImagenetImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n if not osp.isdir(path):\n return []\n return [{ 'url': path, 'format': 'imagenet' }]\n\n\nclass ImagenetConverter(Converter):\n DEFAULT_IMAGE_EXT = '.jpg'\n\n def apply(self):\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n\n subset_dir = self._save_dir\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n\n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n self._save_image(item, osp.join(subset_dir, label_name,\n '%s_%s' % (label_name, self._make_image_filename(item))))\n\n if not labels:\n self._save_image(item, osp.join(subset_dir,\n ImagenetPath.IMAGE_DIR_NO_LABEL,\n ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n self._make_image_filename(item)))\n", "path": "datumaro/plugins/imagenet_format.py"}]}
1,506
590
gh_patches_debug_4923
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-2852
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Crash with web-history-max-items and no items in the history When running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens: ``` 12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)" 12:21:36 DEBUG sql sql:run:99 Running SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)" 12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000} 12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC" 12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds. 12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception Traceback (most recent call last): File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 236, in _update_completion model = func(*args) File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py", line 70, in url hist_cat = histcategory.HistoryCategory(delete_func=_delete_history) File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 54, in __init__ ]), forward_only=False) File "/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py", line 83, in __init__ querystr, self.lastError().text())) qutebrowser.misc.sql.SqlException: Failed to prepare query "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC": "near "ORDER": syntax error Unable to execute statement" ``` cc @rcorre </issue> <code> [start of qutebrowser/completion/models/histcategory.py] 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """A completion category that queries the SQL History store.""" 21 22 import re 23 24 from PyQt5.QtSql import QSqlQueryModel 25 26 from qutebrowser.misc import sql 27 from qutebrowser.utils import debug 28 from qutebrowser.commands import cmdexc 29 from qutebrowser.config import config 30 31 32 class HistoryCategory(QSqlQueryModel): 33 34 """A completion category that queries the SQL History store.""" 35 36 def __init__(self, *, delete_func=None, parent=None): 37 """Create a new History completion category.""" 38 super().__init__(parent=parent) 39 self.name = "History" 40 41 # replace ' in timestamp-format to avoid breaking the query 42 timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')" 43 .format(config.get('completion', 'timestamp-format') 44 .replace("'", "`"))) 45 46 self._query = sql.Query(' '.join([ 47 "SELECT url, title, {}".format(timefmt), 48 "FROM CompletionHistory", 49 # the incoming pattern will have literal % and _ escaped with '\' 50 # we need to tell sql to treat '\' as an escape character 51 "WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\')", 52 self._atime_expr(), 53 "ORDER BY last_atime DESC", 54 ]), forward_only=False) 55 56 # advertise that this model filters by URL and title 57 self.columns_to_filter = [0, 1] 58 self.delete_func = delete_func 59 60 def _atime_expr(self): 61 """If max_items is set, return an expression to limit the query.""" 62 max_items = config.get('completion', 'web-history-max-items') 63 # HistoryCategory should not be added to the completion in that case. 64 assert max_items != 0 65 66 if max_items < 0: 67 return '' 68 69 min_atime = sql.Query(' '.join([ 70 'SELECT min(last_atime) FROM', 71 '(SELECT last_atime FROM CompletionHistory', 72 'ORDER BY last_atime DESC LIMIT :limit)', 73 ])).run(limit=max_items).value() 74 75 return "AND last_atime >= {}".format(min_atime) 76 77 def set_pattern(self, pattern): 78 """Set the pattern used to filter results. 79 80 Args: 81 pattern: string pattern to filter by. 82 """ 83 # escape to treat a user input % or _ as a literal, not a wildcard 84 pattern = pattern.replace('%', '\\%') 85 pattern = pattern.replace('_', '\\_') 86 # treat spaces as wildcards to match any of the typed words 87 pattern = re.sub(r' +', '%', pattern) 88 pattern = '%{}%'.format(pattern) 89 with debug.log_time('sql', 'Running completion query'): 90 self._query.run(pat=pattern) 91 self.setQuery(self._query) 92 93 def delete_cur_item(self, index): 94 """Delete the row at the given index.""" 95 if not self.delete_func: 96 raise cmdexc.CommandError("Cannot delete this item.") 97 data = [self.data(index.sibling(index.row(), i)) 98 for i in range(self.columnCount())] 99 self.delete_func(data) 100 # re-run query to reload updated table 101 with debug.log_time('sql', 'Re-running completion query post-delete'): 102 self._query.run() 103 self.setQuery(self._query) 104 [end of qutebrowser/completion/models/histcategory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py --- a/qutebrowser/completion/models/histcategory.py +++ b/qutebrowser/completion/models/histcategory.py @@ -72,6 +72,10 @@ 'ORDER BY last_atime DESC LIMIT :limit)', ])).run(limit=max_items).value() + if not min_atime: + # if there are no history items, min_atime may be '' (issue #2849) + return '' + return "AND last_atime >= {}".format(min_atime) def set_pattern(self, pattern):
{"golden_diff": "diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -72,6 +72,10 @@\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n \n+ if not min_atime:\n+ # if there are no history items, min_atime may be '' (issue #2849)\n+ return ''\n+\n return \"AND last_atime >= {}\".format(min_atime)\n \n def set_pattern(self, pattern):\n", "issue": "Crash with web-history-max-items and no items in the history\nWhen running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:\r\n\r\n```\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:99 Running SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\"\r\n12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.\r\n12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 236, in _update_completion\r\n model = func(*args)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py\", line 70, in url\r\n hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\", line 54, in __init__\r\n ]), forward_only=False)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py\", line 83, in __init__\r\n querystr, self.lastError().text()))\r\nqutebrowser.misc.sql.SqlException: Failed to prepare query \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\": \"near \"ORDER\": syntax error Unable to execute statement\"\r\n```\r\n\r\ncc @rcorre \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nimport re\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n\n # replace ' in timestamp-format to avoid breaking the query\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(config.get('completion', 'timestamp-format')\n .replace(\"'\", \"`\")))\n\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped with '\\'\n # we need to tell sql to treat '\\' as an escape character\n \"WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\')\",\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.get('completion', 'web-history-max-items')\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n # treat spaces as wildcards to match any of the typed words\n pattern = re.sub(r' +', '%', pattern)\n pattern = '%{}%'.format(pattern)\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(pat=pattern)\n self.setQuery(self._query)\n\n def delete_cur_item(self, index):\n \"\"\"Delete the row at the given index.\"\"\"\n if not self.delete_func:\n raise cmdexc.CommandError(\"Cannot delete this item.\")\n data = [self.data(index.sibling(index.row(), i))\n for i in range(self.columnCount())]\n self.delete_func(data)\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n", "path": "qutebrowser/completion/models/histcategory.py"}]}
2,238
148
gh_patches_debug_32333
rasdani/github-patches
git_diff
airctic__icevision-1091
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typo lightning -> lighting in Albumentations helper There are several instances in the codebase with the typo `lightning` when the intended term is `lighting` https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35 https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57 https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74 Typo lightning -> lighting in Albumentations helper There are several instances in the codebase with the typo `lightning` when the intended term is `lighting` https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35 https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57 https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74 </issue> <code> [start of icevision/tfms/albumentations/albumentations_helpers.py] 1 __all__ = ["aug_tfms", "resize", "resize_and_pad", "get_size_without_padding"] 2 3 import albumentations as A 4 5 from icevision.imports import * 6 from icevision.core import * 7 8 9 def resize(size, ratio_resize=A.LongestMaxSize): 10 return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1]) 11 12 13 def resize_and_pad( 14 size: Union[int, Tuple[int, int]], 15 pad: A.DualTransform = partial( 16 A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104] 17 ), 18 ): 19 width, height = (size, size) if isinstance(size, int) else size 20 return [resize(size), pad(min_height=height, min_width=width)] 21 22 23 def aug_tfms( 24 size: Union[int, Tuple[int, int]], 25 presize: Optional[Union[int, Tuple[int, int]]] = None, 26 horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(), 27 shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate( 28 rotate_limit=15, 29 ), 30 rgb_shift: Optional[A.RGBShift] = A.RGBShift( 31 r_shift_limit=10, 32 g_shift_limit=10, 33 b_shift_limit=10, 34 ), 35 lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(), 36 blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)), 37 crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5), 38 pad: Optional[A.DualTransform] = partial( 39 A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104] 40 ), 41 ) -> List[A.BasicTransform]: 42 """Collection of useful augmentation transforms. 43 44 # Arguments 45 size: The final size of the image. If an `int` is given, the maximum size of 46 the image is rescaled, maintaing aspect ratio. If a `tuple` is given, 47 the image is rescaled to have that exact size (width, height). 48 presize: Rescale the image before applying other transfroms. If `None` this 49 transform is not applied. First introduced by fastai,this technique is 50 explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb) 51 chapter (tip: search for "Presizing"). 52 horizontal_flip: Flip around the y-axis. If `None` this transform is not applied. 53 shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform 54 is not applied. 55 rgb_shift: Randomly shift values for each channel of RGB image. If `None` this 56 transform is not applied. 57 lightning: Randomly changes Brightness and Contrast. If `None` this transform 58 is not applied. 59 blur: Randomly blur the image. If `None` this transform is not applied. 60 crop_fn: Randomly crop the image. If `None` this transform is not applied. 61 Use `partial` to saturate other parameters of the class. 62 pad: Pad the image to `size`, squaring the image if `size` is an `int`. 63 If `None` this transform is not applied. Use `partial` to sature other 64 parameters of the class. 65 66 # Returns 67 A list of albumentations transforms. 68 """ 69 70 width, height = (size, size) if isinstance(size, int) else size 71 72 tfms = [] 73 tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None] 74 tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur] 75 # Resize as the last transforms to reduce the number of artificial artifacts created 76 if crop_fn is not None: 77 crop = crop_fn(height=height, width=width) 78 tfms += [A.OneOrOther(crop, resize(size), p=crop.p)] 79 else: 80 tfms += [resize(size)] 81 tfms += [pad(min_height=height, min_width=width) if pad is not None else None] 82 83 tfms = [tfm for tfm in tfms if tfm is not None] 84 85 return tfms 86 87 88 def get_size_without_padding( 89 tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int 90 ) -> Tuple[int, int]: 91 """ 92 Infer the height and width of the pre-processed image after removing padding. 93 94 Parameters 95 ---------- 96 tfms_list: list of albumentations transforms applied to the `before_tfm_img` image 97 before passing it to the model for inference. 98 before_tfm_img: original image before being pre-processed for inference. 99 height: height of output image from icevision `predict` function. 100 width: width of output image from icevision `predict` function. 101 102 Returns 103 ------- 104 height and width of the image coming out of the inference pipeline, after removing padding 105 """ 106 if get_transform(tfms_list, "Pad") is not None: 107 before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape 108 109 t = get_transform(tfms_list, "SmallestMaxSize") 110 if t is not None: 111 presize = t.max_size 112 height, width = func_max_size(before_pad_h, before_pad_w, presize, min) 113 114 t = get_transform(tfms_list, "LongestMaxSize") 115 if t is not None: 116 size = t.max_size 117 height, width = func_max_size(before_pad_h, before_pad_w, size, max) 118 119 return height, width 120 121 122 def py3round(number: float) -> int: 123 """ 124 Unified rounding in all python versions. Used by albumentations. 125 126 Parameters 127 ---------- 128 number: float to round. 129 130 Returns 131 ------- 132 Rounded number 133 """ 134 if abs(round(number) - number) == 0.5: 135 return int(2.0 * round(number / 2.0)) 136 137 return int(round(number)) 138 139 140 def func_max_size( 141 height: int, width: int, max_size: int, func: Callable[[int, int], int] 142 ) -> Tuple[int, int]: 143 """ 144 Calculate rescaled height and width of the image in question wrt to a specific size. 145 146 Parameters 147 ---------- 148 height: height of the image in question. 149 width: width of the image in question. 150 max_size: size wrt the image needs to be rescaled (resized). 151 func: min/max. Whether to compare max_size to the smallest/longest of the image dims. 152 153 Returns 154 ------- 155 Rescaled height and width 156 """ 157 scale = max_size / float(func(width, height)) 158 159 if scale != 1.0: 160 height, width = tuple(py3round(dim * scale) for dim in (height, width)) 161 return height, width 162 163 164 def get_transform(tfms_list: List[Any], t: str) -> Any: 165 """ 166 Extract transform `t` from `tfms_list`. 167 168 Parameters 169 ---------- 170 tfms_list: list of albumentations transforms. 171 t: name (str) of the transform to look for and return from within `tfms_list`. 172 173 Returns 174 ------- 175 The `t` transform if found inside `tfms_list`, otherwise None. 176 """ 177 for el in tfms_list: 178 if t in str(type(el)): 179 return el 180 return None 181 [end of icevision/tfms/albumentations/albumentations_helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py --- a/icevision/tfms/albumentations/albumentations_helpers.py +++ b/icevision/tfms/albumentations/albumentations_helpers.py @@ -32,7 +32,7 @@ g_shift_limit=10, b_shift_limit=10, ), - lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(), + lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(), blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)), crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5), pad: Optional[A.DualTransform] = partial( @@ -54,7 +54,7 @@ is not applied. rgb_shift: Randomly shift values for each channel of RGB image. If `None` this transform is not applied. - lightning: Randomly changes Brightness and Contrast. If `None` this transform + lighting: Randomly changes Brightness and Contrast. If `None` this transform is not applied. blur: Randomly blur the image. If `None` this transform is not applied. crop_fn: Randomly crop the image. If `None` this transform is not applied. @@ -71,7 +71,7 @@ tfms = [] tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None] - tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur] + tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur] # Resize as the last transforms to reduce the number of artificial artifacts created if crop_fn is not None: crop = crop_fn(height=height, width=width)
{"golden_diff": "diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py\n--- a/icevision/tfms/albumentations/albumentations_helpers.py\n+++ b/icevision/tfms/albumentations/albumentations_helpers.py\n@@ -32,7 +32,7 @@\n g_shift_limit=10,\n b_shift_limit=10,\n ),\n- lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n+ lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n pad: Optional[A.DualTransform] = partial(\n@@ -54,7 +54,7 @@\n is not applied.\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n transform is not applied.\n- lightning: Randomly changes Brightness and Contrast. If `None` this transform\n+ lighting: Randomly changes Brightness and Contrast. If `None` this transform\n is not applied.\n blur: Randomly blur the image. If `None` this transform is not applied.\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\n@@ -71,7 +71,7 @@\n \n tfms = []\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n- tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\n+ tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]\n # Resize as the last transforms to reduce the number of artificial artifacts created\n if crop_fn is not None:\n crop = crop_fn(height=height, width=width)\n", "issue": "Typo lightning -> lighting in Albumentations helper\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\r\n\r\n\nTypo lightning -> lighting in Albumentations helper\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\r\n\r\n\n", "before_files": [{"content": "__all__ = [\"aug_tfms\", \"resize\", \"resize_and_pad\", \"get_size_without_padding\"]\n\nimport albumentations as A\n\nfrom icevision.imports import *\nfrom icevision.core import *\n\n\ndef resize(size, ratio_resize=A.LongestMaxSize):\n return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1])\n\n\ndef resize_and_pad(\n size: Union[int, Tuple[int, int]],\n pad: A.DualTransform = partial(\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n ),\n):\n width, height = (size, size) if isinstance(size, int) else size\n return [resize(size), pad(min_height=height, min_width=width)]\n\n\ndef aug_tfms(\n size: Union[int, Tuple[int, int]],\n presize: Optional[Union[int, Tuple[int, int]]] = None,\n horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),\n shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(\n rotate_limit=15,\n ),\n rgb_shift: Optional[A.RGBShift] = A.RGBShift(\n r_shift_limit=10,\n g_shift_limit=10,\n b_shift_limit=10,\n ),\n lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n pad: Optional[A.DualTransform] = partial(\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n ),\n) -> List[A.BasicTransform]:\n \"\"\"Collection of useful augmentation transforms.\n\n # Arguments\n size: The final size of the image. If an `int` is given, the maximum size of\n the image is rescaled, maintaing aspect ratio. If a `tuple` is given,\n the image is rescaled to have that exact size (width, height).\n presize: Rescale the image before applying other transfroms. If `None` this\n transform is not applied. First introduced by fastai,this technique is\n explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)\n chapter (tip: search for \"Presizing\").\n horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.\n shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform\n is not applied.\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n transform is not applied.\n lightning: Randomly changes Brightness and Contrast. If `None` this transform\n is not applied.\n blur: Randomly blur the image. If `None` this transform is not applied.\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\n Use `partial` to saturate other parameters of the class.\n pad: Pad the image to `size`, squaring the image if `size` is an `int`.\n If `None` this transform is not applied. Use `partial` to sature other\n parameters of the class.\n\n # Returns\n A list of albumentations transforms.\n \"\"\"\n\n width, height = (size, size) if isinstance(size, int) else size\n\n tfms = []\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\n # Resize as the last transforms to reduce the number of artificial artifacts created\n if crop_fn is not None:\n crop = crop_fn(height=height, width=width)\n tfms += [A.OneOrOther(crop, resize(size), p=crop.p)]\n else:\n tfms += [resize(size)]\n tfms += [pad(min_height=height, min_width=width) if pad is not None else None]\n\n tfms = [tfm for tfm in tfms if tfm is not None]\n\n return tfms\n\n\ndef get_size_without_padding(\n tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int\n) -> Tuple[int, int]:\n \"\"\"\n Infer the height and width of the pre-processed image after removing padding.\n\n Parameters\n ----------\n tfms_list: list of albumentations transforms applied to the `before_tfm_img` image\n before passing it to the model for inference.\n before_tfm_img: original image before being pre-processed for inference.\n height: height of output image from icevision `predict` function.\n width: width of output image from icevision `predict` function.\n\n Returns\n -------\n height and width of the image coming out of the inference pipeline, after removing padding\n \"\"\"\n if get_transform(tfms_list, \"Pad\") is not None:\n before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape\n\n t = get_transform(tfms_list, \"SmallestMaxSize\")\n if t is not None:\n presize = t.max_size\n height, width = func_max_size(before_pad_h, before_pad_w, presize, min)\n\n t = get_transform(tfms_list, \"LongestMaxSize\")\n if t is not None:\n size = t.max_size\n height, width = func_max_size(before_pad_h, before_pad_w, size, max)\n\n return height, width\n\n\ndef py3round(number: float) -> int:\n \"\"\"\n Unified rounding in all python versions. Used by albumentations.\n\n Parameters\n ----------\n number: float to round.\n\n Returns\n -------\n Rounded number\n \"\"\"\n if abs(round(number) - number) == 0.5:\n return int(2.0 * round(number / 2.0))\n\n return int(round(number))\n\n\ndef func_max_size(\n height: int, width: int, max_size: int, func: Callable[[int, int], int]\n) -> Tuple[int, int]:\n \"\"\"\n Calculate rescaled height and width of the image in question wrt to a specific size.\n\n Parameters\n ----------\n height: height of the image in question.\n width: width of the image in question.\n max_size: size wrt the image needs to be rescaled (resized).\n func: min/max. Whether to compare max_size to the smallest/longest of the image dims.\n\n Returns\n -------\n Rescaled height and width\n \"\"\"\n scale = max_size / float(func(width, height))\n\n if scale != 1.0:\n height, width = tuple(py3round(dim * scale) for dim in (height, width))\n return height, width\n\n\ndef get_transform(tfms_list: List[Any], t: str) -> Any:\n \"\"\"\n Extract transform `t` from `tfms_list`.\n\n Parameters\n ----------\n tfms_list: list of albumentations transforms.\n t: name (str) of the transform to look for and return from within `tfms_list`.\n\n Returns\n -------\n The `t` transform if found inside `tfms_list`, otherwise None.\n \"\"\"\n for el in tfms_list:\n if t in str(type(el)):\n return el\n return None\n", "path": "icevision/tfms/albumentations/albumentations_helpers.py"}]}
3,162
447
gh_patches_debug_6977
rasdani/github-patches
git_diff
systemd__mkosi-2315
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `sh` unavailable in sandbox on non `usr`-merged systems mkosi's sandbox [mounts `/usr:/usr`](https://github.com/systemd/mkosi/blob/14b07c74d5b3a8b0f44d9a5c733f7902c7859417/mkosi/sandbox.py#L88) if no ToolsTree is specified. On NixOS, which is not usr-merged, this causes problems when programs have runtime dependencies on absolute paths in `(/usr)/bin`. This is the case for `apt-get install`, that tries to find an `sh` at runtime in `/usr/bin` or `/bin`. That fails on NixOS, due to only `/usr` being mounted, while `sh` is in `/bin/sh`. Two possible workarounds that I have in mind: 1. (Preferred) Find a way to tell `apt-get` which shell to use. `dpkg` [respects the `SHELL` environment variable](https://man7.org/linux/man-pages/man1/dpkg.1.html), but I couldn't find a similar option for `apt-get`. If there would be such an option, we should simply tell `apt-get` to use the `sh` from the Nix store that's already mounted. 2. Loosen the sandbox and mount `/bin(/sh)` into the environment. If you have any input regarding the first proposed solution, let me know. Otherwise, I'd be happy to implement the second solution too. Cheers, Moritz </issue> <code> [start of mkosi/sandbox.py] 1 # SPDX-License-Identifier: LGPL-2.1+ 2 import enum 3 import logging 4 import os 5 import uuid 6 from collections.abc import Sequence 7 from pathlib import Path 8 from typing import Optional 9 10 from mkosi.types import PathString 11 from mkosi.util import INVOKING_USER, flatten, one_zero 12 13 14 # https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h 15 class Capability(enum.Enum): 16 CAP_NET_ADMIN = 12 17 18 19 def have_effective_cap(capability: Capability) -> bool: 20 for line in Path("/proc/self/status").read_text().splitlines(): 21 if line.startswith("CapEff:"): 22 hexcap = line.removeprefix("CapEff:").strip() 23 break 24 else: 25 logging.warning(f"\"CapEff:\" not found in /proc/self/status, assuming we don't have {capability}") 26 return False 27 28 return (int(hexcap, 16) & (1 << capability.value)) != 0 29 30 31 def finalize_passwd_mounts(root: Path) -> list[PathString]: 32 """ 33 If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we 34 run the command, to make sure that the command we run uses user/group information from the apivfs 35 directory instead of from the host. 36 """ 37 options: list[PathString] = [] 38 39 for f in ("passwd", "group", "shadow", "gshadow"): 40 options += ["--ro-bind-try", root / "etc" / f, f"/etc/{f}"] 41 42 return options 43 44 45 def finalize_crypto_mounts(tools: Path = Path("/")) -> list[PathString]: 46 mounts = [ 47 (tools / subdir, Path("/") / subdir) 48 for subdir in ( 49 Path("etc/pki"), 50 Path("etc/ssl"), 51 Path("etc/crypto-policies"), 52 Path("etc/ca-certificates"), 53 Path("etc/pacman.d/gnupg"), 54 Path("var/lib/ca-certificates"), 55 ) 56 if (tools / subdir).exists() 57 ] 58 59 return flatten( 60 ["--ro-bind", src, target] 61 for src, target 62 in sorted(set(mounts), key=lambda s: s[1]) 63 ) 64 65 66 def sandbox_cmd( 67 *, 68 network: bool = False, 69 devices: bool = False, 70 scripts: Optional[Path] = None, 71 tools: Path = Path("/"), 72 relaxed: bool = False, 73 options: Sequence[PathString] = (), 74 ) -> list[PathString]: 75 cmdline: list[PathString] = [] 76 77 if not relaxed: 78 # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only 79 # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it 80 # using shell. 81 vartmp = f"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}" 82 cmdline += ["sh", "-c", f"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \"$@\""] 83 else: 84 vartmp = None 85 86 cmdline += [ 87 "bwrap", 88 "--ro-bind", tools / "usr", "/usr", 89 *(["--unshare-net"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []), 90 "--die-with-parent", 91 "--proc", "/proc", 92 "--setenv", "SYSTEMD_OFFLINE", one_zero(network), 93 ] 94 95 if relaxed: 96 cmdline += ["--bind", "/tmp", "/tmp"] 97 else: 98 cmdline += [ 99 "--tmpfs", "/tmp", 100 "--unshare-ipc", 101 ] 102 103 if (tools / "nix/store").exists(): 104 cmdline += ["--bind", tools / "nix/store", "/nix/store"] 105 106 if devices or relaxed: 107 cmdline += [ 108 "--bind", "/sys", "/sys", 109 "--bind", "/run", "/run", 110 "--dev-bind", "/dev", "/dev", 111 ] 112 else: 113 cmdline += ["--dev", "/dev"] 114 115 if relaxed: 116 dirs = ("/etc", "/opt", "/srv", "/media", "/mnt", "/var", os.fspath(INVOKING_USER.home())) 117 118 for d in dirs: 119 if Path(d).exists(): 120 cmdline += ["--bind", d, d] 121 122 if len(Path.cwd().parents) >= 2: 123 # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards. 124 # TODO: Remove list() when we depend on Python 3.10 or newer. 125 d = os.fspath(list(Path.cwd().parents)[-2]) 126 elif len(Path.cwd().parents) == 1: 127 d = os.fspath(Path.cwd()) 128 else: 129 d = "" 130 131 if d and d not in (*dirs, "/home", "/usr", "/nix", "/tmp"): 132 cmdline += ["--bind", d, d] 133 134 if vartmp: 135 cmdline += ["--bind", vartmp, "/var/tmp"] 136 137 for d in ("bin", "sbin", "lib", "lib32", "lib64"): 138 if (p := tools / d).is_symlink(): 139 cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)] 140 141 path = "/usr/bin:/usr/sbin" if tools != Path("/") else os.environ["PATH"] 142 143 cmdline += [ 144 "--setenv", "PATH", f"{scripts or ''}:{path}", 145 *options, 146 ] 147 148 if not relaxed: 149 cmdline += ["--symlink", "../proc/self/mounts", "/etc/mtab"] 150 151 # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it 152 # exists since that points directly back to /usr. Apply this after the options so the caller can mount 153 # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives 154 # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or 155 # fail when trying to create it). 156 if (tools / "etc/alternatives").exists() and (not relaxed or Path("/etc/alternatives").exists()): 157 cmdline += ["--ro-bind", tools / "etc/alternatives", "/etc/alternatives"] 158 159 if scripts: 160 cmdline += ["--ro-bind", scripts, scripts] 161 162 if network and not relaxed: 163 cmdline += ["--bind", "/etc/resolv.conf", "/etc/resolv.conf"] 164 165 # bubblewrap creates everything with a restricted mode so relax stuff as needed. 166 ops = [] 167 if not devices: 168 ops += ["chmod 1777 /dev/shm"] 169 if not relaxed: 170 ops += ["chmod 755 /etc"] 171 ops += ["exec $0 \"$@\""] 172 173 cmdline += ["sh", "-c", " && ".join(ops)] 174 175 return cmdline 176 177 178 def apivfs_cmd(root: Path) -> list[PathString]: 179 return [ 180 "bwrap", 181 "--dev-bind", "/", "/", 182 "--tmpfs", root / "run", 183 "--tmpfs", root / "tmp", 184 "--bind", "/var/tmp", root / "var/tmp", 185 "--proc", root / "proc", 186 "--dev", root / "dev", 187 # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe. 188 "--unsetenv", "TMPDIR", 189 # Make sure /etc/machine-id is not overwritten by any package manager post install scripts. 190 "--ro-bind-try", root / "etc/machine-id", root / "etc/machine-id", 191 *finalize_passwd_mounts(root), 192 "sh", "-c", 193 f"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && " 194 f"chmod 755 {root / 'run'} && " 195 # Make sure anything running in the root directory thinks it's in a container. $container can't always be 196 # accessed so we write /run/host/container-manager as well which is always accessible. 197 f"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && " 198 "exec $0 \"$@\"", 199 ] 200 201 202 def chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]: 203 cmdline: list[PathString] = [ 204 "sh", "-c", 205 f"trap 'rm -rf {root / 'work'}' EXIT && " 206 # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call 207 # mkdir() on each component of the path which means it will try to call 208 # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so 209 # we make sure that it already exists. 210 f"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && " 211 # No exec here because we need to clean up the /work directory afterwards. 212 f"$0 \"$@\"", 213 "bwrap", 214 "--dev-bind", root, "/", 215 "--setenv", "container", "mkosi", 216 "--setenv", "HOME", "/", 217 "--setenv", "PATH", "/work/scripts:/usr/bin:/usr/sbin", 218 ] 219 220 if resolve: 221 cmdline += ["--ro-bind-try", "/etc/resolv.conf", "/etc/resolv.conf"] 222 223 cmdline += options 224 225 return apivfs_cmd(root) + cmdline 226 [end of mkosi/sandbox.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py --- a/mkosi/sandbox.py +++ b/mkosi/sandbox.py @@ -137,6 +137,8 @@ for d in ("bin", "sbin", "lib", "lib32", "lib64"): if (p := tools / d).is_symlink(): cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)] + elif p.is_dir(): + cmdline += ["--ro-bind", p, Path("/") / p.relative_to(tools)] path = "/usr/bin:/usr/sbin" if tools != Path("/") else os.environ["PATH"]
{"golden_diff": "diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py\n--- a/mkosi/sandbox.py\n+++ b/mkosi/sandbox.py\n@@ -137,6 +137,8 @@\n for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n if (p := tools / d).is_symlink():\n cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n+ elif p.is_dir():\n+ cmdline += [\"--ro-bind\", p, Path(\"/\") / p.relative_to(tools)]\n \n path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n", "issue": "`sh` unavailable in sandbox on non `usr`-merged systems\nmkosi's sandbox [mounts `/usr:/usr`](https://github.com/systemd/mkosi/blob/14b07c74d5b3a8b0f44d9a5c733f7902c7859417/mkosi/sandbox.py#L88) if no ToolsTree is specified. On NixOS, which is not usr-merged, this causes problems when programs have runtime dependencies on absolute paths in `(/usr)/bin`. This is the case for `apt-get install`, that tries to find an `sh` at runtime in `/usr/bin` or `/bin`. That fails on NixOS, due to only `/usr` being mounted, while `sh` is in `/bin/sh`.\r\n\r\nTwo possible workarounds that I have in mind:\r\n1. (Preferred) Find a way to tell `apt-get` which shell to use. `dpkg` [respects the `SHELL` environment variable](https://man7.org/linux/man-pages/man1/dpkg.1.html), but I couldn't find a similar option for `apt-get`. If there would be such an option, we should simply tell `apt-get` to use the `sh` from the Nix store that's already mounted.\r\n2. Loosen the sandbox and mount `/bin(/sh)` into the environment. \r\n\r\nIf you have any input regarding the first proposed solution, let me know. Otherwise, I'd be happy to implement the second solution too.\r\n\r\nCheers, Moritz\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\nimport enum\nimport logging\nimport os\nimport uuid\nfrom collections.abc import Sequence\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom mkosi.types import PathString\nfrom mkosi.util import INVOKING_USER, flatten, one_zero\n\n\n# https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\nclass Capability(enum.Enum):\n CAP_NET_ADMIN = 12\n\n\ndef have_effective_cap(capability: Capability) -> bool:\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"CapEff:\"):\n hexcap = line.removeprefix(\"CapEff:\").strip()\n break\n else:\n logging.warning(f\"\\\"CapEff:\\\" not found in /proc/self/status, assuming we don't have {capability}\")\n return False\n\n return (int(hexcap, 16) & (1 << capability.value)) != 0\n\n\ndef finalize_passwd_mounts(root: Path) -> list[PathString]:\n \"\"\"\n If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\n run the command, to make sure that the command we run uses user/group information from the apivfs\n directory instead of from the host.\n \"\"\"\n options: list[PathString] = []\n\n for f in (\"passwd\", \"group\", \"shadow\", \"gshadow\"):\n options += [\"--ro-bind-try\", root / \"etc\" / f, f\"/etc/{f}\"]\n\n return options\n\n\ndef finalize_crypto_mounts(tools: Path = Path(\"/\")) -> list[PathString]:\n mounts = [\n (tools / subdir, Path(\"/\") / subdir)\n for subdir in (\n Path(\"etc/pki\"),\n Path(\"etc/ssl\"),\n Path(\"etc/crypto-policies\"),\n Path(\"etc/ca-certificates\"),\n Path(\"etc/pacman.d/gnupg\"),\n Path(\"var/lib/ca-certificates\"),\n )\n if (tools / subdir).exists()\n ]\n\n return flatten(\n [\"--ro-bind\", src, target]\n for src, target\n in sorted(set(mounts), key=lambda s: s[1])\n )\n\n\ndef sandbox_cmd(\n *,\n network: bool = False,\n devices: bool = False,\n scripts: Optional[Path] = None,\n tools: Path = Path(\"/\"),\n relaxed: bool = False,\n options: Sequence[PathString] = (),\n) -> list[PathString]:\n cmdline: list[PathString] = []\n\n if not relaxed:\n # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only\n # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it\n # using shell.\n vartmp = f\"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}\"\n cmdline += [\"sh\", \"-c\", f\"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \\\"$@\\\"\"]\n else:\n vartmp = None\n\n cmdline += [\n \"bwrap\",\n \"--ro-bind\", tools / \"usr\", \"/usr\",\n *([\"--unshare-net\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\n \"--die-with-parent\",\n \"--proc\", \"/proc\",\n \"--setenv\", \"SYSTEMD_OFFLINE\", one_zero(network),\n ]\n\n if relaxed:\n cmdline += [\"--bind\", \"/tmp\", \"/tmp\"]\n else:\n cmdline += [\n \"--tmpfs\", \"/tmp\",\n \"--unshare-ipc\",\n ]\n\n if (tools / \"nix/store\").exists():\n cmdline += [\"--bind\", tools / \"nix/store\", \"/nix/store\"]\n\n if devices or relaxed:\n cmdline += [\n \"--bind\", \"/sys\", \"/sys\",\n \"--bind\", \"/run\", \"/run\",\n \"--dev-bind\", \"/dev\", \"/dev\",\n ]\n else:\n cmdline += [\"--dev\", \"/dev\"]\n\n if relaxed:\n dirs = (\"/etc\", \"/opt\", \"/srv\", \"/media\", \"/mnt\", \"/var\", os.fspath(INVOKING_USER.home()))\n\n for d in dirs:\n if Path(d).exists():\n cmdline += [\"--bind\", d, d]\n\n if len(Path.cwd().parents) >= 2:\n # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards.\n # TODO: Remove list() when we depend on Python 3.10 or newer.\n d = os.fspath(list(Path.cwd().parents)[-2])\n elif len(Path.cwd().parents) == 1:\n d = os.fspath(Path.cwd())\n else:\n d = \"\"\n\n if d and d not in (*dirs, \"/home\", \"/usr\", \"/nix\", \"/tmp\"):\n cmdline += [\"--bind\", d, d]\n\n if vartmp:\n cmdline += [\"--bind\", vartmp, \"/var/tmp\"]\n\n for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n if (p := tools / d).is_symlink():\n cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n\n path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n\n cmdline += [\n \"--setenv\", \"PATH\", f\"{scripts or ''}:{path}\",\n *options,\n ]\n\n if not relaxed:\n cmdline += [\"--symlink\", \"../proc/self/mounts\", \"/etc/mtab\"]\n\n # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it\n # exists since that points directly back to /usr. Apply this after the options so the caller can mount\n # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives\n # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or\n # fail when trying to create it).\n if (tools / \"etc/alternatives\").exists() and (not relaxed or Path(\"/etc/alternatives\").exists()):\n cmdline += [\"--ro-bind\", tools / \"etc/alternatives\", \"/etc/alternatives\"]\n\n if scripts:\n cmdline += [\"--ro-bind\", scripts, scripts]\n\n if network and not relaxed:\n cmdline += [\"--bind\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n\n # bubblewrap creates everything with a restricted mode so relax stuff as needed.\n ops = []\n if not devices:\n ops += [\"chmod 1777 /dev/shm\"]\n if not relaxed:\n ops += [\"chmod 755 /etc\"]\n ops += [\"exec $0 \\\"$@\\\"\"]\n\n cmdline += [\"sh\", \"-c\", \" && \".join(ops)]\n\n return cmdline\n\n\ndef apivfs_cmd(root: Path) -> list[PathString]:\n return [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--tmpfs\", root / \"run\",\n \"--tmpfs\", root / \"tmp\",\n \"--bind\", \"/var/tmp\", root / \"var/tmp\",\n \"--proc\", root / \"proc\",\n \"--dev\", root / \"dev\",\n # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe.\n \"--unsetenv\", \"TMPDIR\",\n # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\n \"--ro-bind-try\", root / \"etc/machine-id\", root / \"etc/machine-id\",\n *finalize_passwd_mounts(root),\n \"sh\", \"-c\",\n f\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && \"\n f\"chmod 755 {root / 'run'} && \"\n # Make sure anything running in the root directory thinks it's in a container. $container can't always be\n # accessed so we write /run/host/container-manager as well which is always accessible.\n f\"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && \"\n \"exec $0 \\\"$@\\\"\",\n ]\n\n\ndef chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]:\n cmdline: list[PathString] = [\n \"sh\", \"-c\",\n f\"trap 'rm -rf {root / 'work'}' EXIT && \"\n # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call\n # mkdir() on each component of the path which means it will try to call\n # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so\n # we make sure that it already exists.\n f\"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && \"\n # No exec here because we need to clean up the /work directory afterwards.\n f\"$0 \\\"$@\\\"\",\n \"bwrap\",\n \"--dev-bind\", root, \"/\",\n \"--setenv\", \"container\", \"mkosi\",\n \"--setenv\", \"HOME\", \"/\",\n \"--setenv\", \"PATH\", \"/work/scripts:/usr/bin:/usr/sbin\",\n ]\n\n if resolve:\n cmdline += [\"--ro-bind-try\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n\n cmdline += options\n\n return apivfs_cmd(root) + cmdline\n", "path": "mkosi/sandbox.py"}]}
3,639
156
gh_patches_debug_35400
rasdani/github-patches
git_diff
opsdroid__opsdroid-167
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Module specific site-packages It could be good for modules to install their dependancies in a specific `site-packages` directory which is only added to the path when it is time to import the modules. A good place could be `~/.opsdroid/site-packages`. </issue> <code> [start of opsdroid/loader.py] 1 """Class for loading in modules to OpsDroid.""" 2 3 import logging 4 import os 5 import sys 6 import shutil 7 import subprocess 8 import importlib 9 import yaml 10 from opsdroid.const import ( 11 DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH, 12 DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE) 13 14 15 _LOGGER = logging.getLogger(__name__) 16 17 18 class Loader: 19 """Class to load in config and modules.""" 20 21 def __init__(self, opsdroid): 22 """Create object with opsdroid instance.""" 23 self.opsdroid = opsdroid 24 self.modules_directory = None 25 self.current_import_config = None 26 _LOGGER.debug("Loaded loader") 27 28 @staticmethod 29 def import_module(config): 30 """Import module namespace as variable and return it.""" 31 try: 32 module = importlib.import_module( 33 config["module_path"] + "." + config["name"]) 34 _LOGGER.debug("Loaded " + config["type"] + ": " + 35 config["module_path"]) 36 return module 37 except ImportError as error: 38 _LOGGER.debug("Failed to load " + config["type"] + 39 " " + config["module_path"] + "." + config["name"]) 40 _LOGGER.debug(error) 41 42 try: 43 module = importlib.import_module( 44 config["module_path"]) 45 _LOGGER.debug("Loaded " + config["type"] + ": " + 46 config["module_path"]) 47 return module 48 except ImportError as error: 49 _LOGGER.debug("Failed to load " + config["type"] + 50 " " + config["module_path"]) 51 _LOGGER.debug(error) 52 53 _LOGGER.error("Failed to load " + config["type"] + 54 " " + config["module_path"]) 55 return None 56 57 @staticmethod 58 def check_cache(config): 59 """Remove module if 'no-cache' set in config.""" 60 if "no-cache" in config \ 61 and config["no-cache"]: 62 _LOGGER.debug("'no-cache' set, removing " + config["install_path"]) 63 if os.path.isdir(config["install_path"]): 64 shutil.rmtree(config["install_path"]) 65 if os.path.isfile(config["install_path"] + ".py"): 66 os.remove(config["install_path"] + ".py") 67 68 def build_module_path(self, path_type, config): 69 """Generate the module path from name and type.""" 70 if path_type == "import": 71 return MODULES_DIRECTORY + "." + config["type"] + \ 72 "." + config["name"] 73 elif path_type == "install": 74 return self.modules_directory + "/" + config["type"] + \ 75 "/" + config["name"] 76 77 @staticmethod 78 def git_clone(git_url, install_path, branch): 79 """Clone a git repo to a location and wait for finish.""" 80 process = subprocess.Popen(["git", "clone", "-b", branch, 81 git_url, install_path], shell=False, 82 stdout=subprocess.PIPE, 83 stderr=subprocess.PIPE) 84 for output in process.communicate(): 85 if output != "": 86 for line in output.splitlines(): 87 _LOGGER.debug(str(line).strip()) 88 process.wait() 89 90 @staticmethod 91 def pip_install_deps(requirements_path): 92 """Pip install a requirements.txt file and wait for finish.""" 93 process = subprocess.Popen(["pip", "install", "-r", requirements_path], 94 shell=False, 95 stdout=subprocess.PIPE, 96 stderr=subprocess.PIPE) 97 for output in process.communicate(): 98 if output != "": 99 for line in output.splitlines(): 100 _LOGGER.debug(str(line).strip()) 101 process.wait() 102 103 @staticmethod 104 def create_default_config(config_path): 105 """Create a default config file based on the included example.""" 106 _LOGGER.info("Creating %s.", config_path) 107 config_dir, _ = os.path.split(config_path) 108 if not os.path.isdir(config_dir): 109 os.makedirs(config_dir) 110 shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path) 111 return config_path 112 113 def load_config_file(self, config_paths): 114 """Load a yaml config file from path.""" 115 config_path = "" 116 for possible_path in config_paths: 117 if not os.path.isfile(possible_path): 118 _LOGGER.debug("Config file " + possible_path + 119 " not found") 120 else: 121 config_path = possible_path 122 break 123 124 if not config_path: 125 _LOGGER.info("No configuration files found.") 126 config_path = self.create_default_config(DEFAULT_CONFIG_PATH) 127 128 try: 129 with open(config_path, 'r') as stream: 130 _LOGGER.info("Loaded config from %s", config_path) 131 return yaml.load(stream) 132 except yaml.YAMLError as error: 133 self.opsdroid.critical(error, 1) 134 except FileNotFoundError as error: 135 self.opsdroid.critical(str(error), 1) 136 137 def setup_modules_directory(self, config): 138 """Create and configure the modules directory.""" 139 module_path = os.path.expanduser( 140 config.get("module-path", DEFAULT_MODULES_PATH)) 141 sys.path.append(module_path) 142 143 if not os.path.isdir(module_path): 144 os.makedirs(module_path, exist_ok=True) 145 146 self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY) 147 148 # Create modules directory if doesn't exist 149 if not os.path.isdir(self.modules_directory): 150 os.makedirs(self.modules_directory) 151 152 def load_modules_from_config(self, config): 153 """Load all module types based on config.""" 154 _LOGGER.debug("Loading modules from config") 155 156 self.setup_modules_directory(config) 157 158 connectors, databases, skills = None, None, None 159 160 if 'databases' in config.keys(): 161 databases = self._load_modules('database', config['databases']) 162 else: 163 _LOGGER.warning("No databases in configuration") 164 165 if 'skills' in config.keys(): 166 skills = self._load_modules('skill', config['skills']) 167 else: 168 self.opsdroid.critical( 169 "No skills in configuration, at least 1 required", 1) 170 171 if 'connectors' in config.keys(): 172 connectors = self._load_modules('connector', config['connectors']) 173 else: 174 self.opsdroid.critical( 175 "No connectors in configuration, at least 1 required", 1) 176 177 return connectors, databases, skills 178 179 def _load_modules(self, modules_type, modules): 180 """Install and load modules.""" 181 _LOGGER.debug("Loading " + modules_type + " modules") 182 loaded_modules = [] 183 184 for module in modules: 185 186 # Set up module config 187 config = module 188 config = {} if config is None else config 189 config["name"] = module["name"] 190 config["type"] = modules_type 191 config["module_path"] = self.build_module_path("import", config) 192 config["install_path"] = self.build_module_path("install", config) 193 if "branch" not in config: 194 config["branch"] = DEFAULT_MODULE_BRANCH 195 196 # Remove module for reinstall if no-cache set 197 self.check_cache(config) 198 199 # Install module 200 self._install_module(config) 201 202 # Import module 203 self.current_import_config = config 204 module = self.import_module(config) 205 if module is not None: 206 loaded_modules.append({ 207 "module": module, 208 "config": config}) 209 else: 210 _LOGGER.error( 211 "Module " + config["name"] + " failed to import") 212 213 return loaded_modules 214 215 def _install_module(self, config): 216 # pylint: disable=R0201 217 """Install a module.""" 218 _LOGGER.debug("Installing " + config["name"]) 219 220 if os.path.isdir(config["install_path"]) or \ 221 os.path.isfile(config["install_path"] + ".py"): 222 # TODO Allow for updating or reinstalling of modules 223 _LOGGER.debug("Module " + config["name"] + 224 " already installed, skipping") 225 return 226 227 if "path" in config: 228 self._install_local_module(config) 229 else: 230 self._install_git_module(config) 231 232 if os.path.isdir(config["install_path"]): 233 _LOGGER.debug("Installed " + config["name"] + 234 " to " + config["install_path"]) 235 else: 236 _LOGGER.debug("Install of " + config["name"] + " failed") 237 238 # Install module dependancies 239 if os.path.isfile(config["install_path"] + "/requirements.txt"): 240 self.pip_install_deps(config["install_path"] + 241 "/requirements.txt") 242 243 def _install_git_module(self, config): 244 """Install a module from a git repository.""" 245 if config is not None and "repo" in config: 246 git_url = config["repo"] 247 else: 248 git_url = DEFAULT_GIT_URL + config["type"] + \ 249 "-" + config["name"] + ".git" 250 251 if any(prefix in git_url for prefix in ["http", "https", "ssh"]): 252 # TODO Test if url or ssh path exists 253 # TODO Handle github authentication 254 _LOGGER.debug("Cloning from remote repository") 255 self.git_clone(git_url, config["install_path"], 256 config["branch"]) 257 else: 258 if os.path.isdir(git_url): 259 _LOGGER.debug("Cloning from local repository") 260 self.git_clone(git_url, config["install_path"], 261 config["branch"]) 262 else: 263 _LOGGER.debug("Could not find local git repo " + git_url) 264 265 @staticmethod 266 def _install_local_module(config): 267 """Install a module from a local path.""" 268 installed = False 269 270 installdir, _ = os.path.split(config["install_path"]) 271 if not os.path.isdir(installdir): 272 os.makedirs(installdir, exist_ok=True) 273 274 if os.path.isdir(config["path"]): 275 shutil.copytree(config["path"], config["install_path"]) 276 installed = True 277 278 if os.path.isfile(config["path"]): 279 os.makedirs(config["install_path"], exist_ok=True) 280 shutil.copyfile(config["path"], config["install_path"] + 281 "/__init__.py") 282 installed = True 283 284 if not installed: 285 _LOGGER.error("Failed to install from " + config["path"]) 286 [end of opsdroid/loader.py] [start of opsdroid/const.py] 1 """Constants used by OpsDroid.""" 2 import os 3 4 __version__ = "0.8.0" 5 6 LOG_FILENAME = 'output.log' 7 DEFAULT_GIT_URL = "https://github.com/opsdroid/" 8 MODULES_DIRECTORY = "opsdroid-modules" 9 DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid") 10 DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules") 11 DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml") 12 DEFAULT_MODULE_BRANCH = "master" 13 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 14 "configuration/example_configuration.yaml") 15 [end of opsdroid/const.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/const.py b/opsdroid/const.py --- a/opsdroid/const.py +++ b/opsdroid/const.py @@ -8,6 +8,7 @@ MODULES_DIRECTORY = "opsdroid-modules" DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid") DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules") +DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages") DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml") DEFAULT_MODULE_BRANCH = "master" EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), diff --git a/opsdroid/loader.py b/opsdroid/loader.py --- a/opsdroid/loader.py +++ b/opsdroid/loader.py @@ -9,7 +9,8 @@ import yaml from opsdroid.const import ( DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH, - DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE) + DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE, + DEFAULT_MODULE_DEPS_PATH) _LOGGER = logging.getLogger(__name__) @@ -90,7 +91,11 @@ @staticmethod def pip_install_deps(requirements_path): """Pip install a requirements.txt file and wait for finish.""" - process = subprocess.Popen(["pip", "install", "-r", requirements_path], + process = subprocess.Popen(["pip", "install", + "--target={}".format( + DEFAULT_MODULE_DEPS_PATH), + "--ignore-installed", + "-r", requirements_path], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -181,6 +186,10 @@ _LOGGER.debug("Loading " + modules_type + " modules") loaded_modules = [] + if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH): + os.makedirs(DEFAULT_MODULE_DEPS_PATH) + sys.path.append(DEFAULT_MODULE_DEPS_PATH) + for module in modules: # Set up module config
{"golden_diff": "diff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -8,6 +8,7 @@\n MODULES_DIRECTORY = \"opsdroid-modules\"\n DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n+DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n DEFAULT_MODULE_BRANCH = \"master\"\n EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\ndiff --git a/opsdroid/loader.py b/opsdroid/loader.py\n--- a/opsdroid/loader.py\n+++ b/opsdroid/loader.py\n@@ -9,7 +9,8 @@\n import yaml\n from opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n- DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\n+ DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\n+ DEFAULT_MODULE_DEPS_PATH)\n \n \n _LOGGER = logging.getLogger(__name__)\n@@ -90,7 +91,11 @@\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n- process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n+ process = subprocess.Popen([\"pip\", \"install\",\n+ \"--target={}\".format(\n+ DEFAULT_MODULE_DEPS_PATH),\n+ \"--ignore-installed\",\n+ \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n@@ -181,6 +186,10 @@\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n \n+ if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\n+ os.makedirs(DEFAULT_MODULE_DEPS_PATH)\n+ sys.path.append(DEFAULT_MODULE_DEPS_PATH)\n+\n for module in modules:\n \n # Set up module config\n", "issue": "Module specific site-packages\nIt could be good for modules to install their dependancies in a specific `site-packages` directory which is only added to the path when it is time to import the modules.\r\n\r\nA good place could be `~/.opsdroid/site-packages`.\n", "before_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n self.modules_directory = None\n self.current_import_config = None\n _LOGGER.debug(\"Loaded loader\")\n\n @staticmethod\n def import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(error)\n\n try:\n module = importlib.import_module(\n config[\"module_path\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n _LOGGER.debug(error)\n\n _LOGGER.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n return None\n\n @staticmethod\n def check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"]:\n _LOGGER.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n if os.path.isdir(config[\"install_path\"]):\n shutil.rmtree(config[\"install_path\"])\n if os.path.isfile(config[\"install_path\"] + \".py\"):\n os.remove(config[\"install_path\"] + \".py\")\n\n def build_module_path(self, path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \\\n \".\" + config[\"name\"]\n elif path_type == \"install\":\n return self.modules_directory + \"/\" + config[\"type\"] + \\\n \"/\" + config[\"name\"]\n\n @staticmethod\n def git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def create_default_config(config_path):\n \"\"\"Create a default config file based on the included example.\"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(\"Config file \" + possible_path +\n \" not found\")\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\"No configuration files found.\")\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def setup_modules_directory(self, config):\n \"\"\"Create and configure the modules directory.\"\"\"\n module_path = os.path.expanduser(\n config.get(\"module-path\", DEFAULT_MODULES_PATH))\n sys.path.append(module_path)\n\n if not os.path.isdir(module_path):\n os.makedirs(module_path, exist_ok=True)\n\n self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(self.modules_directory):\n os.makedirs(self.modules_directory)\n\n def load_modules_from_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n _LOGGER.debug(\"Loading modules from config\")\n\n self.setup_modules_directory(config)\n\n connectors, databases, skills = None, None, None\n\n if 'databases' in config.keys():\n databases = self._load_modules('database', config['databases'])\n else:\n _LOGGER.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n skills = self._load_modules('skill', config['skills'])\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n connectors = self._load_modules('connector', config['connectors'])\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n return connectors, databases, skills\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n for module in modules:\n\n # Set up module config\n config = module\n config = {} if config is None else config\n config[\"name\"] = module[\"name\"]\n config[\"type\"] = modules_type\n config[\"module_path\"] = self.build_module_path(\"import\", config)\n config[\"install_path\"] = self.build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n self.check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n self.current_import_config = config\n module = self.import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n else:\n _LOGGER.error(\n \"Module \" + config[\"name\"] + \" failed to import\")\n\n return loaded_modules\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n _LOGGER.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]) or \\\n os.path.isfile(config[\"install_path\"] + \".py\"):\n # TODO Allow for updating or reinstalling of modules\n _LOGGER.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n return\n\n if \"path\" in config:\n self._install_local_module(config)\n else:\n self._install_git_module(config)\n\n if os.path.isdir(config[\"install_path\"]):\n _LOGGER.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n _LOGGER.debug(\"Install of \" + config[\"name\"] + \" failed\")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n self.pip_install_deps(config[\"install_path\"] +\n \"/requirements.txt\")\n\n def _install_git_module(self, config):\n \"\"\"Install a module from a git repository.\"\"\"\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n _LOGGER.debug(\"Cloning from remote repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n _LOGGER.debug(\"Cloning from local repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n _LOGGER.debug(\"Could not find local git repo \" + git_url)\n\n @staticmethod\n def _install_local_module(config):\n \"\"\"Install a module from a local path.\"\"\"\n installed = False\n\n installdir, _ = os.path.split(config[\"install_path\"])\n if not os.path.isdir(installdir):\n os.makedirs(installdir, exist_ok=True)\n\n if os.path.isdir(config[\"path\"]):\n shutil.copytree(config[\"path\"], config[\"install_path\"])\n installed = True\n\n if os.path.isfile(config[\"path\"]):\n os.makedirs(config[\"install_path\"], exist_ok=True)\n shutil.copyfile(config[\"path\"], config[\"install_path\"] +\n \"/__init__.py\")\n installed = True\n\n if not installed:\n _LOGGER.error(\"Failed to install from \" + config[\"path\"])\n", "path": "opsdroid/loader.py"}, {"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.0\"\n\nLOG_FILENAME = 'output.log'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}]}
3,675
463
gh_patches_debug_38680
rasdani/github-patches
git_diff
zigpy__zha-device-handlers-528
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery First Blind: Never updates the battery automatically. However if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value. Remote: does update battery on its own. Both: both values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows. ![zha cluster 2](https://user-images.githubusercontent.com/44187409/77225998-78782080-6b6c-11ea-908b-7518207189ad.JPG) ![zha cluster 1](https://user-images.githubusercontent.com/44187409/77226005-8332b580-6b6c-11ea-8a51-03e128548433.JPG) </issue> <code> [start of zhaquirks/ikea/blinds.py] 1 """Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.""" 2 from zigpy.profiles import zha 3 from zigpy.quirks import CustomDevice 4 from zigpy.zcl.clusters.closures import WindowCovering 5 from zigpy.zcl.clusters.general import ( 6 Basic, 7 Groups, 8 Identify, 9 Ota, 10 PollControl, 11 PowerConfiguration, 12 Scenes, 13 ) 14 from zigpy.zcl.clusters.lightlink import LightLink 15 16 from . import IKEA 17 from .. import DoublingPowerConfigurationCluster 18 from ..const import ( 19 DEVICE_TYPE, 20 ENDPOINTS, 21 INPUT_CLUSTERS, 22 MODELS_INFO, 23 OUTPUT_CLUSTERS, 24 PROFILE_ID, 25 ) 26 27 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636 28 29 30 class IkeaTradfriRollerBlinds(CustomDevice): 31 """Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.""" 32 33 signature = { 34 # <SimpleDescriptor endpoint=1 profile=260 device_type=2080 35 # device_version=1 36 # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096] 37 # output_clusters=[25, 4096]> 38 MODELS_INFO: [ 39 (IKEA, "FYRTUR block-out roller blind"), 40 (IKEA, "KADRILJ roller blind"), 41 ], 42 ENDPOINTS: { 43 1: { 44 PROFILE_ID: zha.PROFILE_ID, 45 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE, 46 INPUT_CLUSTERS: [ 47 Basic.cluster_id, 48 PowerConfiguration.cluster_id, 49 Identify.cluster_id, 50 Groups.cluster_id, 51 Scenes.cluster_id, 52 PollControl.cluster_id, 53 WindowCovering.cluster_id, 54 LightLink.cluster_id, 55 ], 56 OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id], 57 } 58 }, 59 } 60 61 replacement = { 62 "endpoints": { 63 1: { 64 "profile_id": zha.PROFILE_ID, 65 "device_type": zha.DeviceType.WINDOW_COVERING_DEVICE, 66 "input_clusters": [ 67 Basic.cluster_id, 68 DoublingPowerConfigurationCluster, 69 Identify.cluster_id, 70 Groups.cluster_id, 71 Scenes.cluster_id, 72 PollControl.cluster_id, 73 WindowCovering.cluster_id, 74 LightLink.cluster_id, 75 ], 76 "output_clusters": [Ota.cluster_id, LightLink.cluster_id], 77 } 78 } 79 } 80 [end of zhaquirks/ikea/blinds.py] [start of zhaquirks/ikea/opencloseremote.py] 1 """Device handler for IKEA of Sweden TRADFRI remote control.""" 2 from zigpy.profiles import zha 3 from zigpy.quirks import CustomDevice 4 from zigpy.zcl.clusters.closures import WindowCovering 5 from zigpy.zcl.clusters.general import ( 6 Alarms, 7 Basic, 8 Groups, 9 Identify, 10 LevelControl, 11 OnOff, 12 Ota, 13 PollControl, 14 PowerConfiguration, 15 ) 16 from zigpy.zcl.clusters.lightlink import LightLink 17 18 from . import IKEA 19 from .. import DoublingPowerConfigurationCluster 20 from ..const import ( 21 DEVICE_TYPE, 22 ENDPOINTS, 23 INPUT_CLUSTERS, 24 MODELS_INFO, 25 OUTPUT_CLUSTERS, 26 PROFILE_ID, 27 ) 28 29 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636 30 31 32 class IkeaTradfriOpenCloseRemote(CustomDevice): 33 """Custom device representing IKEA of Sweden TRADFRI remote control.""" 34 35 signature = { 36 MODELS_INFO: [("\x02KE", "TRADFRI open/close remote")], 37 ENDPOINTS: { 38 1: { 39 PROFILE_ID: zha.PROFILE_ID, 40 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER, 41 INPUT_CLUSTERS: [ 42 Basic.cluster_id, 43 PowerConfiguration.cluster_id, 44 Identify.cluster_id, 45 Alarms.cluster_id, 46 PollControl.cluster_id, 47 LightLink.cluster_id, 48 IKEA_CLUSTER_ID, 49 ], 50 OUTPUT_CLUSTERS: [ 51 Identify.cluster_id, 52 Groups.cluster_id, 53 OnOff.cluster_id, 54 LevelControl.cluster_id, 55 Ota.cluster_id, 56 WindowCovering.cluster_id, 57 LightLink.cluster_id, 58 ], 59 } 60 }, 61 } 62 63 replacement = { 64 MODELS_INFO: [(IKEA, "TRADFRI open/close remote")], 65 ENDPOINTS: { 66 1: { 67 PROFILE_ID: zha.PROFILE_ID, 68 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER, 69 INPUT_CLUSTERS: [ 70 Basic.cluster_id, 71 DoublingPowerConfigurationCluster, 72 Identify.cluster_id, 73 Alarms.cluster_id, 74 PollControl.cluster_id, 75 LightLink.cluster_id, 76 IKEA_CLUSTER_ID, 77 ], 78 OUTPUT_CLUSTERS: [ 79 Identify.cluster_id, 80 Groups.cluster_id, 81 OnOff.cluster_id, 82 LevelControl.cluster_id, 83 Ota.cluster_id, 84 WindowCovering.cluster_id, 85 LightLink.cluster_id, 86 ], 87 } 88 }, 89 } 90 [end of zhaquirks/ikea/opencloseremote.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py --- a/zhaquirks/ikea/blinds.py +++ b/zhaquirks/ikea/blinds.py @@ -31,9 +31,9 @@ """Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.""" signature = { - # <SimpleDescriptor endpoint=1 profile=260 device_type=2080 + # <SimpleDescriptor endpoint=1 profile=260 device_type=514 # device_version=1 - # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096] + # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636] # output_clusters=[25, 4096]> MODELS_INFO: [ (IKEA, "FYRTUR block-out roller blind"), @@ -52,6 +52,7 @@ PollControl.cluster_id, WindowCovering.cluster_id, LightLink.cluster_id, + IKEA_CLUSTER_ID, ], OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id], } @@ -59,11 +60,11 @@ } replacement = { - "endpoints": { + ENDPOINTS: { 1: { - "profile_id": zha.PROFILE_ID, - "device_type": zha.DeviceType.WINDOW_COVERING_DEVICE, - "input_clusters": [ + PROFILE_ID: zha.PROFILE_ID, + DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE, + INPUT_CLUSTERS: [ Basic.cluster_id, DoublingPowerConfigurationCluster, Identify.cluster_id, @@ -72,8 +73,9 @@ PollControl.cluster_id, WindowCovering.cluster_id, LightLink.cluster_id, + IKEA_CLUSTER_ID, ], - "output_clusters": [Ota.cluster_id, LightLink.cluster_id], + OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id], } } } diff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py --- a/zhaquirks/ikea/opencloseremote.py +++ b/zhaquirks/ikea/opencloseremote.py @@ -33,7 +33,14 @@ """Custom device representing IKEA of Sweden TRADFRI remote control.""" signature = { - MODELS_INFO: [("\x02KE", "TRADFRI open/close remote")], + # <SimpleDescriptor endpoint=1 profile=260 device_type=515 + # device_version=1 + # input_clusters=[0, 1, 3, 9, 32, 4096, 64636] + # output_clusters=[3, 4, 6, 8, 25, 258, 4096]> + MODELS_INFO: [ + ("\x02KE", "TRADFRI open/close remote"), + (IKEA, "TRADFRI open/close remote"), + ], ENDPOINTS: { 1: { PROFILE_ID: zha.PROFILE_ID,
{"golden_diff": "diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py\n--- a/zhaquirks/ikea/blinds.py\n+++ b/zhaquirks/ikea/blinds.py\n@@ -31,9 +31,9 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n \n signature = {\n- # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n # device_version=1\n- # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]\n+ # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n@@ -52,6 +52,7 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n@@ -59,11 +60,11 @@\n }\n \n replacement = {\n- \"endpoints\": {\n+ ENDPOINTS: {\n 1: {\n- \"profile_id\": zha.PROFILE_ID,\n- \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n- \"input_clusters\": [\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n+ INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n@@ -72,8 +73,9 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n- \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\ndiff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py\n--- a/zhaquirks/ikea/opencloseremote.py\n+++ b/zhaquirks/ikea/opencloseremote.py\n@@ -33,7 +33,14 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=515\n+ # device_version=1\n+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n+ # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n+ MODELS_INFO: [\n+ (\"\\x02KE\", \"TRADFRI open/close remote\"),\n+ (IKEA, \"TRADFRI open/close remote\"),\n+ ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n", "issue": "[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery\nFirst \r\n\r\nBlind:\r\n\r\nNever updates the battery automatically.\r\n\r\nHowever if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.\r\n\r\nRemote:\r\n\r\ndoes update battery on its own.\r\n\r\nBoth:\r\n\r\nboth values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.\r\n![zha cluster 2](https://user-images.githubusercontent.com/44187409/77225998-78782080-6b6c-11ea-908b-7518207189ad.JPG)\r\n![zha cluster 1](https://user-images.githubusercontent.com/44187409/77226005-8332b580-6b6c-11ea-8a51-03e128548433.JPG)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Basic,\n Groups,\n Identify,\n Ota,\n PollControl,\n PowerConfiguration,\n Scenes,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriRollerBlinds(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n (IKEA, \"KADRILJ roller blind\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n },\n }\n\n replacement = {\n \"endpoints\": {\n 1: {\n \"profile_id\": zha.PROFILE_ID,\n \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n \"input_clusters\": [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/ikea/blinds.py"}, {"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI remote control.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriOpenCloseRemote(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n MODELS_INFO: [(IKEA, \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n", "path": "zhaquirks/ikea/opencloseremote.py"}]}
2,211
789
gh_patches_debug_12128
rasdani/github-patches
git_diff
nf-core__tools-1357
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Linting should fail if pipeline module file is edited In https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this. The linting warns me that something is amiss: ``` │ bismark/align │ modules/nf-cor… │ Local copy of │ │ │ │ module outdated │ ``` But it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error. Possible that the code for this is lurking in a PR that's waiting to be merged.. </issue> <code> [start of nf_core/modules/lint/module_changes.py] 1 """ 2 Check whether the content of a module has changed compared to the original repository 3 """ 4 import os 5 import requests 6 import rich 7 from nf_core.modules.lint import LintResult 8 9 10 def module_changes(module_lint_object, module): 11 """ 12 Checks whether installed nf-core modules have changed compared to the 13 original repository 14 Downloads the 'main.nf' and 'meta.yml' files for every module 15 and compares them to the local copies 16 17 If the module has a 'git_sha', the file content is checked against this sha 18 """ 19 files_to_check = ["main.nf", "meta.yml"] 20 21 # Loop over nf-core modules 22 module_base_url = f"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/" 23 24 # If module.git_sha specified, check specific commit version for changes 25 if module.git_sha: 26 module_base_url = f"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/" 27 28 for f in files_to_check: 29 # open local copy, continue if file not found (a failed message has already been issued in this case) 30 try: 31 local_copy = open(os.path.join(module.module_dir, f), "r").read() 32 except FileNotFoundError as e: 33 continue 34 35 # Download remote copy and compare 36 url = module_base_url + f 37 r = requests.get(url=url) 38 39 if r.status_code != 200: 40 module.warned.append( 41 ( 42 "check_local_copy", 43 f"Could not fetch remote copy, skipping comparison.", 44 f"{os.path.join(module.module_dir, f)}", 45 ) 46 ) 47 else: 48 try: 49 remote_copy = r.content.decode("utf-8") 50 51 if local_copy != remote_copy: 52 module.warned.append( 53 ( 54 "check_local_copy", 55 "Local copy of module outdated", 56 f"{os.path.join(module.module_dir, f)}", 57 ) 58 ) 59 else: 60 module.passed.append( 61 ( 62 "check_local_copy", 63 "Local copy of module up to date", 64 f"{os.path.join(module.module_dir, f)}", 65 ) 66 ) 67 except UnicodeDecodeError as e: 68 module.warned.append( 69 ( 70 "check_local_copy", 71 f"Could not decode file from {url}. Skipping comparison ({e})", 72 f"{os.path.join(module.module_dir, f)}", 73 ) 74 ) 75 [end of nf_core/modules/lint/module_changes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py --- a/nf_core/modules/lint/module_changes.py +++ b/nf_core/modules/lint/module_changes.py @@ -49,10 +49,10 @@ remote_copy = r.content.decode("utf-8") if local_copy != remote_copy: - module.warned.append( + module.failed.append( ( "check_local_copy", - "Local copy of module outdated", + "Local copy of module does not match remote", f"{os.path.join(module.module_dir, f)}", ) )
{"golden_diff": "diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py\n--- a/nf_core/modules/lint/module_changes.py\n+++ b/nf_core/modules/lint/module_changes.py\n@@ -49,10 +49,10 @@\n remote_copy = r.content.decode(\"utf-8\")\n \n if local_copy != remote_copy:\n- module.warned.append(\n+ module.failed.append(\n (\n \"check_local_copy\",\n- \"Local copy of module outdated\",\n+ \"Local copy of module does not match remote\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "issue": "Linting should fail if pipeline module file is edited\nIn https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.\r\n\r\nThe linting warns me that something is amiss:\r\n\r\n```\r\n\u2502 bismark/align \u2502 modules/nf-cor\u2026 \u2502 Local copy of \u2502\r\n\u2502 \u2502 \u2502 module outdated \u2502\r\n```\r\n\r\nBut it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.\r\n\r\nPossible that the code for this is lurking in a PR that's waiting to be merged..\n", "before_files": [{"content": "\"\"\"\nCheck whether the content of a module has changed compared to the original repository\n\"\"\"\nimport os\nimport requests\nimport rich\nfrom nf_core.modules.lint import LintResult\n\n\ndef module_changes(module_lint_object, module):\n \"\"\"\n Checks whether installed nf-core modules have changed compared to the\n original repository\n Downloads the 'main.nf' and 'meta.yml' files for every module\n and compares them to the local copies\n\n If the module has a 'git_sha', the file content is checked against this sha\n \"\"\"\n files_to_check = [\"main.nf\", \"meta.yml\"]\n\n # Loop over nf-core modules\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\"\n\n # If module.git_sha specified, check specific commit version for changes\n if module.git_sha:\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\"\n\n for f in files_to_check:\n # open local copy, continue if file not found (a failed message has already been issued in this case)\n try:\n local_copy = open(os.path.join(module.module_dir, f), \"r\").read()\n except FileNotFoundError as e:\n continue\n\n # Download remote copy and compare\n url = module_base_url + f\n r = requests.get(url=url)\n\n if r.status_code != 200:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not fetch remote copy, skipping comparison.\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n try:\n remote_copy = r.content.decode(\"utf-8\")\n\n if local_copy != remote_copy:\n module.warned.append(\n (\n \"check_local_copy\",\n \"Local copy of module outdated\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n module.passed.append(\n (\n \"check_local_copy\",\n \"Local copy of module up to date\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n except UnicodeDecodeError as e:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not decode file from {url}. Skipping comparison ({e})\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "path": "nf_core/modules/lint/module_changes.py"}]}
1,365
142
gh_patches_debug_6286
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-284
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Include tailwind.css via npm instead of static file At the moment, we include tailwind.css as a static file in our repo. Instead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain. </issue> <code> [start of backend/cms/views/media/media_list_view.py] 1 from django.contrib.auth.decorators import login_required 2 from django.shortcuts import render 3 from django.utils.decorators import method_decorator 4 from django.views.generic import TemplateView 5 6 from ...decorators import region_permission_required 7 from ...models import Document 8 9 10 @method_decorator(login_required, name='dispatch') 11 @method_decorator(region_permission_required, name='dispatch') 12 class MediaListView(TemplateView): 13 template_name = 'media/list.html' 14 base_context = {'current_menu_item': 'media'} 15 16 def get(self, request, *args, **kwargs): 17 documents = Document.objects.all() 18 19 return render( 20 request, 21 self.template_name, 22 { 23 **self.base_context, 24 'documents': documents 25 } 26 ) 27 [end of backend/cms/views/media/media_list_view.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py --- a/backend/cms/views/media/media_list_view.py +++ b/backend/cms/views/media/media_list_view.py @@ -10,7 +10,7 @@ @method_decorator(login_required, name='dispatch') @method_decorator(region_permission_required, name='dispatch') class MediaListView(TemplateView): - template_name = 'media/list.html' + template_name = 'media/media_list.html' base_context = {'current_menu_item': 'media'} def get(self, request, *args, **kwargs):
{"golden_diff": "diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py\n--- a/backend/cms/views/media/media_list_view.py\n+++ b/backend/cms/views/media/media_list_view.py\n@@ -10,7 +10,7 @@\n @method_decorator(login_required, name='dispatch')\n @method_decorator(region_permission_required, name='dispatch')\n class MediaListView(TemplateView):\n- template_name = 'media/list.html'\n+ template_name = 'media/media_list.html'\n base_context = {'current_menu_item': 'media'}\n \n def get(self, request, *args, **kwargs):\n", "issue": "Include tailwind.css via npm instead of static file\nAt the moment, we include tailwind.css as a static file in our repo.\r\nInstead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.\n", "before_files": [{"content": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom ...decorators import region_permission_required\nfrom ...models import Document\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass MediaListView(TemplateView):\n template_name = 'media/list.html'\n base_context = {'current_menu_item': 'media'}\n\n def get(self, request, *args, **kwargs):\n documents = Document.objects.all()\n\n return render(\n request,\n self.template_name,\n {\n **self.base_context,\n 'documents': documents\n }\n )\n", "path": "backend/cms/views/media/media_list_view.py"}]}
806
131
gh_patches_debug_378
rasdani/github-patches
git_diff
encode__uvicorn-1099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping ### Checklist <!-- Please make sure you check all these items before submitting your bug report. --> - [X] The bug is reproducible against the latest release and/or `master`. - [X] There are no similar issues or pull requests to fix it yet. ### Describe the bug When adding uvicorn package with poetry the following warning is raised: PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping because the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`. Because of this, the `click` dependency is not installed and uvicorn may not work. ### To reproduce Just execute `poetry add uvicorn` in any environment. ### Expected behavior To install `uvicorn` correctly together with all its dependencies. ### Actual behavior The `click` dependency is not installed and uvicorn may not work. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import os 5 import re 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 path = os.path.join(package, "__init__.py") 15 init_py = open(path, "r", encoding="utf8").read() 16 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) 17 18 19 def get_long_description(): 20 """ 21 Return the README. 22 """ 23 return open("README.md", "r", encoding="utf8").read() 24 25 26 def get_packages(package): 27 """ 28 Return root package and all sub-packages. 29 """ 30 return [ 31 dirpath 32 for dirpath, dirnames, filenames in os.walk(package) 33 if os.path.exists(os.path.join(dirpath, "__init__.py")) 34 ] 35 36 37 env_marker_cpython = ( 38 "sys_platform != 'win32'" 39 " and (sys_platform != 'cygwin'" 40 " and platform_python_implementation != 'PyPy')" 41 ) 42 43 env_marker_win = "sys_platform == 'win32'" 44 env_marker_below_38 = "python_version < '3.8'" 45 46 minimal_requirements = [ 47 "asgiref>=3.4.0", 48 "click>=7.*", 49 "h11>=0.8", 50 "typing-extensions;" + env_marker_below_38, 51 ] 52 53 54 extra_requirements = [ 55 "websockets>=9.1", 56 "httptools==0.2.*", 57 "uvloop>=0.14.0,!=0.15.0,!=0.15.1; " + env_marker_cpython, 58 "colorama>=0.4;" + env_marker_win, 59 "watchgod>=0.6", 60 "python-dotenv>=0.13", 61 "PyYAML>=5.1", 62 ] 63 64 65 setup( 66 name="uvicorn", 67 version=get_version("uvicorn"), 68 url="https://github.com/encode/uvicorn", 69 license="BSD", 70 description="The lightning-fast ASGI server.", 71 long_description=get_long_description(), 72 long_description_content_type="text/markdown", 73 author="Tom Christie", 74 author_email="[email protected]", 75 packages=get_packages("uvicorn"), 76 install_requires=minimal_requirements, 77 extras_require={"standard": extra_requirements}, 78 include_package_data=True, 79 classifiers=[ 80 "Development Status :: 4 - Beta", 81 "Environment :: Web Environment", 82 "Intended Audience :: Developers", 83 "License :: OSI Approved :: BSD License", 84 "Operating System :: OS Independent", 85 "Topic :: Internet :: WWW/HTTP", 86 "Programming Language :: Python :: 3", 87 "Programming Language :: Python :: 3.6", 88 "Programming Language :: Python :: 3.7", 89 "Programming Language :: Python :: 3.8", 90 "Programming Language :: Python :: 3.9", 91 "Programming Language :: Python :: Implementation :: CPython", 92 "Programming Language :: Python :: Implementation :: PyPy", 93 ], 94 entry_points=""" 95 [console_scripts] 96 uvicorn=uvicorn.main:main 97 """, 98 ) 99 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -45,7 +45,7 @@ minimal_requirements = [ "asgiref>=3.4.0", - "click>=7.*", + "click>=7.0", "h11>=0.8", "typing-extensions;" + env_marker_below_38, ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n \n minimal_requirements = [\n \"asgiref>=3.4.0\",\n- \"click>=7.*\",\n+ \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n ]\n", "issue": "PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [X] The bug is reproducible against the latest release and/or `master`.\r\n- [X] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nWhen adding uvicorn package with poetry the following warning is raised:\r\n\r\n PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\r\n\r\nbecause the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.\r\n\r\nBecause of this, the `click` dependency is not installed and uvicorn may not work.\r\n\r\n### To reproduce\r\n\r\nJust execute `poetry add uvicorn` in any environment.\r\n\r\n### Expected behavior\r\n\r\nTo install `uvicorn` correctly together with all its dependencies.\r\n\r\n### Actual behavior\r\n\r\nThe `click` dependency is not installed and uvicorn may not work.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.*\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n", "path": "setup.py"}]}
1,656
92
gh_patches_debug_31527
rasdani/github-patches
git_diff
iterative__dvc-4739
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dvc dag --outs In the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. I thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG. Can you maybe (re)implement this feature? Thanks! dvc dag --outs In the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. I thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG. Can you maybe (re)implement this feature? Thanks! </issue> <code> [start of dvc/command/dag.py] 1 import argparse 2 import logging 3 4 from dvc.command.base import CmdBase, append_doc_link 5 from dvc.exceptions import DvcException 6 7 logger = logging.getLogger(__name__) 8 9 10 def _show_ascii(G): 11 from dvc.dagascii import draw 12 from dvc.repo.graph import get_pipelines 13 14 pipelines = get_pipelines(G) 15 16 ret = [] 17 for pipeline in pipelines: 18 ret.append(draw(pipeline.nodes, pipeline.edges)) 19 20 return "\n".join(ret) 21 22 23 def _show_dot(G): 24 import io 25 26 from networkx.drawing.nx_pydot import write_dot 27 28 dot_file = io.StringIO() 29 write_dot(G, dot_file) 30 return dot_file.getvalue() 31 32 33 def _build(G, target=None, full=False): 34 import networkx as nx 35 36 from dvc.repo.graph import get_pipeline, get_pipelines 37 38 if target: 39 H = get_pipeline(get_pipelines(G), target) 40 if not full: 41 descendants = nx.descendants(G, target) 42 descendants.add(target) 43 H.remove_nodes_from(set(G.nodes()) - descendants) 44 else: 45 H = G 46 47 def _relabel(stage): 48 return stage.addressing 49 50 return nx.relabel_nodes(H, _relabel, copy=False) 51 52 53 class CmdDAG(CmdBase): 54 def run(self): 55 try: 56 target = None 57 if self.args.target: 58 stages = self.repo.collect(self.args.target) 59 if len(stages) > 1: 60 logger.error( 61 f"'{self.args.target}' contains more than one stage " 62 "{stages}, please specify one stage" 63 ) 64 return 1 65 target = stages[0] 66 67 G = _build(self.repo.graph, target=target, full=self.args.full,) 68 69 if self.args.dot: 70 logger.info(_show_dot(G)) 71 else: 72 from dvc.utils.pager import pager 73 74 pager(_show_ascii(G)) 75 76 return 0 77 except DvcException: 78 msg = "failed to show " 79 if self.args.target: 80 msg += f"a pipeline for '{target}'" 81 else: 82 msg += "pipelines" 83 logger.exception(msg) 84 return 1 85 86 87 def add_parser(subparsers, parent_parser): 88 DAG_HELP = "Visualize DVC project DAG." 89 dag_parser = subparsers.add_parser( 90 "dag", 91 parents=[parent_parser], 92 description=append_doc_link(DAG_HELP, "dag"), 93 help=DAG_HELP, 94 formatter_class=argparse.RawDescriptionHelpFormatter, 95 ) 96 dag_parser.add_argument( 97 "--dot", 98 action="store_true", 99 default=False, 100 help="Print DAG with .dot format.", 101 ) 102 dag_parser.add_argument( 103 "--full", 104 action="store_true", 105 default=False, 106 help=( 107 "Show full DAG that the target belongs too, instead of " 108 "showing DAG consisting only of ancestors." 109 ), 110 ) 111 dag_parser.add_argument( 112 "target", 113 nargs="?", 114 help="Stage or output to show pipeline for (optional). " 115 "Finds all stages in the workspace by default.", 116 ) 117 dag_parser.set_defaults(func=CmdDAG) 118 [end of dvc/command/dag.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/dag.py b/dvc/command/dag.py --- a/dvc/command/dag.py +++ b/dvc/command/dag.py @@ -30,7 +30,7 @@ return dot_file.getvalue() -def _build(G, target=None, full=False): +def _build(G, target=None, full=False, outs=False): import networkx as nx from dvc.repo.graph import get_pipeline, get_pipelines @@ -44,8 +44,25 @@ else: H = G - def _relabel(stage): - return stage.addressing + if outs: + G = nx.DiGraph() + for stage in H.nodes: + G.add_nodes_from(stage.outs) + + for from_stage, to_stage in nx.edge_dfs(H): + G.add_edges_from( + [ + (from_out, to_out) + for from_out in from_stage.outs + for to_out in to_stage.outs + ] + ) + H = G + + def _relabel(node): + from dvc.stage import Stage + + return node.addressing if isinstance(node, Stage) else str(node) return nx.relabel_nodes(H, _relabel, copy=False) @@ -64,7 +81,12 @@ return 1 target = stages[0] - G = _build(self.repo.graph, target=target, full=self.args.full,) + G = _build( + self.repo.graph, + target=target, + full=self.args.full, + outs=self.args.outs, + ) if self.args.dot: logger.info(_show_dot(G)) @@ -108,6 +130,13 @@ "showing DAG consisting only of ancestors." ), ) + dag_parser.add_argument( + "-o", + "--outs", + action="store_true", + default=False, + help="Print output files instead of stages.", + ) dag_parser.add_argument( "target", nargs="?",
{"golden_diff": "diff --git a/dvc/command/dag.py b/dvc/command/dag.py\n--- a/dvc/command/dag.py\n+++ b/dvc/command/dag.py\n@@ -30,7 +30,7 @@\n return dot_file.getvalue()\n \n \n-def _build(G, target=None, full=False):\n+def _build(G, target=None, full=False, outs=False):\n import networkx as nx\n \n from dvc.repo.graph import get_pipeline, get_pipelines\n@@ -44,8 +44,25 @@\n else:\n H = G\n \n- def _relabel(stage):\n- return stage.addressing\n+ if outs:\n+ G = nx.DiGraph()\n+ for stage in H.nodes:\n+ G.add_nodes_from(stage.outs)\n+\n+ for from_stage, to_stage in nx.edge_dfs(H):\n+ G.add_edges_from(\n+ [\n+ (from_out, to_out)\n+ for from_out in from_stage.outs\n+ for to_out in to_stage.outs\n+ ]\n+ )\n+ H = G\n+\n+ def _relabel(node):\n+ from dvc.stage import Stage\n+\n+ return node.addressing if isinstance(node, Stage) else str(node)\n \n return nx.relabel_nodes(H, _relabel, copy=False)\n \n@@ -64,7 +81,12 @@\n return 1\n target = stages[0]\n \n- G = _build(self.repo.graph, target=target, full=self.args.full,)\n+ G = _build(\n+ self.repo.graph,\n+ target=target,\n+ full=self.args.full,\n+ outs=self.args.outs,\n+ )\n \n if self.args.dot:\n logger.info(_show_dot(G))\n@@ -108,6 +130,13 @@\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n+ dag_parser.add_argument(\n+ \"-o\",\n+ \"--outs\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Print output files instead of stages.\",\n+ )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n", "issue": "dvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\ndvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\ndef _show_ascii(G):\n from dvc.dagascii import draw\n from dvc.repo.graph import get_pipelines\n\n pipelines = get_pipelines(G)\n\n ret = []\n for pipeline in pipelines:\n ret.append(draw(pipeline.nodes, pipeline.edges))\n\n return \"\\n\".join(ret)\n\n\ndef _show_dot(G):\n import io\n\n from networkx.drawing.nx_pydot import write_dot\n\n dot_file = io.StringIO()\n write_dot(G, dot_file)\n return dot_file.getvalue()\n\n\ndef _build(G, target=None, full=False):\n import networkx as nx\n\n from dvc.repo.graph import get_pipeline, get_pipelines\n\n if target:\n H = get_pipeline(get_pipelines(G), target)\n if not full:\n descendants = nx.descendants(G, target)\n descendants.add(target)\n H.remove_nodes_from(set(G.nodes()) - descendants)\n else:\n H = G\n\n def _relabel(stage):\n return stage.addressing\n\n return nx.relabel_nodes(H, _relabel, copy=False)\n\n\nclass CmdDAG(CmdBase):\n def run(self):\n try:\n target = None\n if self.args.target:\n stages = self.repo.collect(self.args.target)\n if len(stages) > 1:\n logger.error(\n f\"'{self.args.target}' contains more than one stage \"\n \"{stages}, please specify one stage\"\n )\n return 1\n target = stages[0]\n\n G = _build(self.repo.graph, target=target, full=self.args.full,)\n\n if self.args.dot:\n logger.info(_show_dot(G))\n else:\n from dvc.utils.pager import pager\n\n pager(_show_ascii(G))\n\n return 0\n except DvcException:\n msg = \"failed to show \"\n if self.args.target:\n msg += f\"a pipeline for '{target}'\"\n else:\n msg += \"pipelines\"\n logger.exception(msg)\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n DAG_HELP = \"Visualize DVC project DAG.\"\n dag_parser = subparsers.add_parser(\n \"dag\",\n parents=[parent_parser],\n description=append_doc_link(DAG_HELP, \"dag\"),\n help=DAG_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n dag_parser.add_argument(\n \"--dot\",\n action=\"store_true\",\n default=False,\n help=\"Print DAG with .dot format.\",\n )\n dag_parser.add_argument(\n \"--full\",\n action=\"store_true\",\n default=False,\n help=(\n \"Show full DAG that the target belongs too, instead of \"\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n help=\"Stage or output to show pipeline for (optional). \"\n \"Finds all stages in the workspace by default.\",\n )\n dag_parser.set_defaults(func=CmdDAG)\n", "path": "dvc/command/dag.py"}]}
1,671
471
gh_patches_debug_32659
rasdani/github-patches
git_diff
pypi__warehouse-12343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python 3.1 classifier filtering is broken When [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results. [1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1 </issue> <code> [start of warehouse/search/queries.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import re 14 15 from elasticsearch_dsl import Q 16 17 SEARCH_FIELDS = [ 18 "author", 19 "author_email", 20 "description", 21 "download_url", 22 "home_page", 23 "keywords", 24 "license", 25 "maintainer", 26 "maintainer_email", 27 "normalized_name", 28 "platform", 29 "summary", 30 ] 31 SEARCH_BOOSTS = { 32 "name": 10, 33 "normalized_name": 10, 34 "description": 5, 35 "keywords": 5, 36 "summary": 5, 37 } 38 SEARCH_FILTER_ORDER = ( 39 "Framework", 40 "Topic", 41 "Development Status", 42 "License", 43 "Programming Language", 44 "Operating System", 45 "Environment", 46 "Intended Audience", 47 "Natural Language", 48 ) 49 50 51 def get_es_query(es, terms, order, classifiers): 52 """ 53 Returns an Elasticsearch query from data from the request. 54 """ 55 if not terms: 56 query = es.query() 57 else: 58 bool_query = gather_es_queries(terms) 59 query = es.query(bool_query) 60 query = query.suggest("name_suggestion", terms, term={"field": "name"}) 61 62 # Require match to all specified classifiers 63 for classifier in classifiers: 64 query = query.query("prefix", classifiers=classifier) 65 66 query = query_for_order(query, order) 67 return query 68 69 70 def gather_es_queries(q): 71 quoted_string, unquoted_string = filter_query(q) 72 must = [form_query("phrase", i) for i in quoted_string] + [ 73 form_query("best_fields", i) for i in unquoted_string 74 ] 75 76 bool_query = Q("bool", must=must) 77 78 # Allow to optionally match on prefix 79 # if ``q`` is longer than one character. 80 if len(q) > 1: 81 bool_query = bool_query | Q("prefix", normalized_name=q) 82 return bool_query 83 84 85 def filter_query(s): 86 """ 87 Filters given query with the below regex 88 and returns lists of quoted and unquoted strings 89 """ 90 matches = re.findall(r'(?:"([^"]*)")|([^"]*)', s) 91 result_quoted = [t[0].strip() for t in matches if t[0]] 92 result_unquoted = [t[1].strip() for t in matches if t[1]] 93 return result_quoted, result_unquoted 94 95 96 def form_query(query_type, query): 97 """ 98 Returns a multi match query 99 """ 100 fields = [ 101 field + "^" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field 102 for field in SEARCH_FIELDS 103 ] 104 return Q("multi_match", fields=fields, query=query, type=query_type) 105 106 107 def query_for_order(query, order): 108 """ 109 Applies transformations on the ES query based on the search order. 110 111 Order is assumed to be a string with the name of a field with an optional 112 hyphen to indicate descending sort order. 113 """ 114 if order == "": # relevance should not sort 115 return query 116 117 field = order[order.find("-") + 1 :] 118 sort_info = { 119 field: { 120 "order": "desc" if order.startswith("-") else "asc", 121 "unmapped_type": "long", 122 } 123 } 124 query = query.sort(sort_info) 125 return query 126 [end of warehouse/search/queries.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py --- a/warehouse/search/queries.py +++ b/warehouse/search/queries.py @@ -52,36 +52,45 @@ """ Returns an Elasticsearch query from data from the request. """ + classifier_q = Q( + "bool", + # Theh results must have all selected classifiers + must=[ + Q( + "bool", + should=[ + # Term search for the exact classifier + Q("term", classifiers=classifier), + # Prefix search for potential children classifiers + Q("prefix", classifiers=classifier + " :: "), + ], + ) + for classifier in classifiers + ], + ) if not terms: - query = es.query() + query = es.query(classifier_q) if classifiers else es.query() else: - bool_query = gather_es_queries(terms) + quoted_string, unquoted_string = filter_query(terms) + bool_query = Q( + "bool", + must=[form_query("phrase", i) for i in quoted_string] + + [form_query("best_fields", i) for i in unquoted_string] + + ([classifier_q] if classifiers else []), + ) + + # Allow to optionally match on prefix + # if ``q`` is longer than one character. + if len(terms) > 1: + bool_query = bool_query | Q("prefix", normalized_name=terms) + query = es.query(bool_query) query = query.suggest("name_suggestion", terms, term={"field": "name"}) - # Require match to all specified classifiers - for classifier in classifiers: - query = query.query("prefix", classifiers=classifier) - query = query_for_order(query, order) return query -def gather_es_queries(q): - quoted_string, unquoted_string = filter_query(q) - must = [form_query("phrase", i) for i in quoted_string] + [ - form_query("best_fields", i) for i in unquoted_string - ] - - bool_query = Q("bool", must=must) - - # Allow to optionally match on prefix - # if ``q`` is longer than one character. - if len(q) > 1: - bool_query = bool_query | Q("prefix", normalized_name=q) - return bool_query - - def filter_query(s): """ Filters given query with the below regex
{"golden_diff": "diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py\n--- a/warehouse/search/queries.py\n+++ b/warehouse/search/queries.py\n@@ -52,36 +52,45 @@\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n+ classifier_q = Q(\n+ \"bool\",\n+ # Theh results must have all selected classifiers\n+ must=[\n+ Q(\n+ \"bool\",\n+ should=[\n+ # Term search for the exact classifier\n+ Q(\"term\", classifiers=classifier),\n+ # Prefix search for potential children classifiers\n+ Q(\"prefix\", classifiers=classifier + \" :: \"),\n+ ],\n+ )\n+ for classifier in classifiers\n+ ],\n+ )\n if not terms:\n- query = es.query()\n+ query = es.query(classifier_q) if classifiers else es.query()\n else:\n- bool_query = gather_es_queries(terms)\n+ quoted_string, unquoted_string = filter_query(terms)\n+ bool_query = Q(\n+ \"bool\",\n+ must=[form_query(\"phrase\", i) for i in quoted_string]\n+ + [form_query(\"best_fields\", i) for i in unquoted_string]\n+ + ([classifier_q] if classifiers else []),\n+ )\n+\n+ # Allow to optionally match on prefix\n+ # if ``q`` is longer than one character.\n+ if len(terms) > 1:\n+ bool_query = bool_query | Q(\"prefix\", normalized_name=terms)\n+\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n \n- # Require match to all specified classifiers\n- for classifier in classifiers:\n- query = query.query(\"prefix\", classifiers=classifier)\n-\n query = query_for_order(query, order)\n return query\n \n \n-def gather_es_queries(q):\n- quoted_string, unquoted_string = filter_query(q)\n- must = [form_query(\"phrase\", i) for i in quoted_string] + [\n- form_query(\"best_fields\", i) for i in unquoted_string\n- ]\n-\n- bool_query = Q(\"bool\", must=must)\n-\n- # Allow to optionally match on prefix\n- # if ``q`` is longer than one character.\n- if len(q) > 1:\n- bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n- return bool_query\n-\n-\n def filter_query(s):\n \"\"\"\n Filters given query with the below regex\n", "issue": "Python 3.1 classifier filtering is broken\nWhen [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.\r\n\r\n[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom elasticsearch_dsl import Q\n\nSEARCH_FIELDS = [\n \"author\",\n \"author_email\",\n \"description\",\n \"download_url\",\n \"home_page\",\n \"keywords\",\n \"license\",\n \"maintainer\",\n \"maintainer_email\",\n \"normalized_name\",\n \"platform\",\n \"summary\",\n]\nSEARCH_BOOSTS = {\n \"name\": 10,\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Framework\",\n \"Topic\",\n \"Development Status\",\n \"License\",\n \"Programming Language\",\n \"Operating System\",\n \"Environment\",\n \"Intended Audience\",\n \"Natural Language\",\n)\n\n\ndef get_es_query(es, terms, order, classifiers):\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n if not terms:\n query = es.query()\n else:\n bool_query = gather_es_queries(terms)\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n\n # Require match to all specified classifiers\n for classifier in classifiers:\n query = query.query(\"prefix\", classifiers=classifier)\n\n query = query_for_order(query, order)\n return query\n\n\ndef gather_es_queries(q):\n quoted_string, unquoted_string = filter_query(q)\n must = [form_query(\"phrase\", i) for i in quoted_string] + [\n form_query(\"best_fields\", i) for i in unquoted_string\n ]\n\n bool_query = Q(\"bool\", must=must)\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(q) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n return bool_query\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef query_for_order(query, order):\n \"\"\"\n Applies transformations on the ES query based on the search order.\n\n Order is assumed to be a string with the name of a field with an optional\n hyphen to indicate descending sort order.\n \"\"\"\n if order == \"\": # relevance should not sort\n return query\n\n field = order[order.find(\"-\") + 1 :]\n sort_info = {\n field: {\n \"order\": \"desc\" if order.startswith(\"-\") else \"asc\",\n \"unmapped_type\": \"long\",\n }\n }\n query = query.sort(sort_info)\n return query\n", "path": "warehouse/search/queries.py"}]}
1,788
561
gh_patches_debug_29422
rasdani/github-patches
git_diff
freedomofpress__securedrop-7035
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> determine post-upgrade failure-mode for a SHA-1-signed submission key ## Description After #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature. After #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948. What will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0? ## Possible approaches | Option | Documentation changes | Code changes | Implication | | --- | --- | --- | --- | | Fail open, but log | optional | ✓ | Admin must monitor logs and/or OSSEC alerts. | | Fail open, but document | ✓ | ✗ | Admin must monitor release notes or check documentation. | | Fail closed | optional | ✓[1] | Admin can contact us for help. | **Notes:** 1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`. </issue> <code> [start of securedrop/journalist.py] 1 from encryption import EncryptionManager, GpgKeyNotFoundError 2 from execution import asynchronous 3 from journalist_app import create_app 4 from models import Source 5 from sdconfig import SecureDropConfig 6 7 config = SecureDropConfig.get_current() 8 # app is imported by journalist.wsgi 9 app = create_app(config) 10 11 12 @asynchronous 13 def prime_keycache() -> None: 14 """Pre-load the source public keys into Redis.""" 15 with app.app_context(): 16 encryption_mgr = EncryptionManager.get_default() 17 for source in Source.query.filter_by(pending=False, deleted_at=None).all(): 18 try: 19 encryption_mgr.get_source_public_key(source.filesystem_id) 20 except GpgKeyNotFoundError: 21 pass 22 23 24 prime_keycache() 25 26 27 if __name__ == "__main__": # pragma: no cover 28 debug = getattr(config, "env", "prod") != "prod" 29 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host 30 app.run(debug=debug, host="0.0.0.0", port=8081) 31 [end of securedrop/journalist.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/journalist.py b/securedrop/journalist.py --- a/securedrop/journalist.py +++ b/securedrop/journalist.py @@ -1,9 +1,13 @@ +import sys + from encryption import EncryptionManager, GpgKeyNotFoundError from execution import asynchronous from journalist_app import create_app from models import Source from sdconfig import SecureDropConfig +import redwood + config = SecureDropConfig.get_current() # app is imported by journalist.wsgi app = create_app(config) @@ -21,10 +25,28 @@ pass -prime_keycache() +def validate_journalist_key() -> None: + """Verify the journalist PGP key is valid""" + encryption_mgr = EncryptionManager.get_default() + # First check that we can read it + try: + journalist_key = encryption_mgr.get_journalist_public_key() + except Exception as e: + print(f"ERROR: Unable to read journalist public key: {e}", file=sys.stderr) + app.logger.error(f"ERROR: Unable to read journalist public key: {e}") + sys.exit(1) + # And then what we read is valid + try: + redwood.is_valid_public_key(journalist_key) + except redwood.RedwoodError as e: + print(f"ERROR: Journalist public key is not valid: {e}", file=sys.stderr) + app.logger.error(f"ERROR: Journalist public key is not valid: {e}") + sys.exit(1) if __name__ == "__main__": # pragma: no cover + validate_journalist_key() + prime_keycache() debug = getattr(config, "env", "prod") != "prod" # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host app.run(debug=debug, host="0.0.0.0", port=8081)
{"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -1,9 +1,13 @@\n+import sys\n+\n from encryption import EncryptionManager, GpgKeyNotFoundError\n from execution import asynchronous\n from journalist_app import create_app\n from models import Source\n from sdconfig import SecureDropConfig\n \n+import redwood\n+\n config = SecureDropConfig.get_current()\n # app is imported by journalist.wsgi\n app = create_app(config)\n@@ -21,10 +25,28 @@\n pass\n \n \n-prime_keycache()\n+def validate_journalist_key() -> None:\n+ \"\"\"Verify the journalist PGP key is valid\"\"\"\n+ encryption_mgr = EncryptionManager.get_default()\n+ # First check that we can read it\n+ try:\n+ journalist_key = encryption_mgr.get_journalist_public_key()\n+ except Exception as e:\n+ print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n+ sys.exit(1)\n+ # And then what we read is valid\n+ try:\n+ redwood.is_valid_public_key(journalist_key)\n+ except redwood.RedwoodError as e:\n+ print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\": # pragma: no cover\n+ validate_journalist_key()\n+ prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "issue": "determine post-upgrade failure-mode for a SHA-1-signed submission key\n## Description\r\n\r\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\r\n\r\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\r\n\r\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\r\n\r\n## Possible approaches\r\n\r\n| Option | Documentation changes | Code changes | Implication |\r\n| --- | --- | --- | --- |\r\n| Fail open, but log | optional | \u2713 | Admin must monitor logs and/or OSSEC alerts. |\r\n| Fail open, but document | \u2713 | \u2717 | Admin must monitor release notes or check documentation. |\r\n| Fail closed | optional | \u2713[1] | Admin can contact us for help. |\r\n\r\n**Notes:**\r\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\n", "before_files": [{"content": "from encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\nprime_keycache()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}]}
1,074
440
gh_patches_debug_7869
rasdani/github-patches
git_diff
freedomofpress__securedrop-3756
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Builder image needs updates ## Description sd-builder image needs to be updated. ## Steps to Reproduce `make build-debs` and observe error. ## Expected Behavior `make build-debs` should exit without error. ## Actual Behavior `make-build-debs` returns an error, security updates are needed for the container. ## Comments Instructions are available here: https://docs.securedrop.org/en/latest/development/dockerbuildmaint.html </issue> <code> [start of securedrop/version.py] 1 __version__ = '0.9.0~rc1' 2 [end of securedrop/version.py] [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 # 3 # SecureDrop documentation build configuration file, created by 4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015. 5 # 6 # This file is execfile()d with the current directory set to its 7 # containing dir. 8 # 9 # Note that not all possible configuration values are present in this 10 # autogenerated file. 11 # 12 # All configuration values have a default; values that are commented out 13 # serve to show the default. 14 15 import os 16 17 # Detect if we're being built by Read the Docs 18 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs 19 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 20 21 # If extensions (or modules to document with autodoc) are in another directory, 22 # add these directories to sys.path here. If the directory is relative to the 23 # documentation root, use os.path.abspath to make it absolute, like shown here. 24 # sys.path.insert(0, os.path.abspath('.')) 25 26 # -- General configuration ------------------------------------------------ 27 28 # If your documentation needs a minimal Sphinx version, state it here. 29 # needs_sphinx = '1.0' 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = ['sphinx.ext.todo', ] 35 36 # Add any paths that contain templates here, relative to this directory. 37 templates_path = ['_templates'] 38 39 # The suffix(es) of source filenames. 40 # You can specify multiple suffix as a list of string: 41 # source_suffix = ['.rst', '.md'] 42 source_suffix = '.rst' 43 44 # The encoding of source files. 45 # source_encoding = 'utf-8-sig' 46 47 # The master toctree document. 48 master_doc = 'index' 49 50 # General information about the project. 51 project = u'SecureDrop' 52 copyright = u'2017, Freedom of the Press Foundation' 53 author = u'SecureDrop Team and Contributors' 54 55 # The version info for the project you're documenting, acts as replacement for 56 # |version| and |release|, also used in various other places throughout the 57 # built documents. 58 # 59 # The short X.Y version. 60 version = '0.9.0~rc1' 61 # The full version, including alpha/beta/rc tags. 62 release = '0.9.0~rc1' 63 64 # The language for content autogenerated by Sphinx. Refer to documentation 65 # for a list of supported languages. 66 # 67 # This is also used if you do content translation via gettext catalogs. 68 # Usually you set "language" from the command line for these cases. 69 language = None 70 71 # There are two options for replacing |today|: either, you set today to some 72 # non-false value, then it is used: 73 # today = '' 74 # Else, today_fmt is used as the format for a strftime call. 75 # today_fmt = '%B %d, %Y' 76 77 # List of patterns, relative to source directory, that match files and 78 # directories to ignore when looking for source files. 79 exclude_patterns = ['_build'] 80 81 # The reST default role (used for this markup: `text`) to use for all 82 # documents. 83 # default_role = None 84 85 # If true, '()' will be appended to :func: etc. cross-reference text. 86 # add_function_parentheses = True 87 88 # If true, the current module name will be prepended to all description 89 # unit titles (such as .. function::). 90 # add_module_names = True 91 92 # If true, sectionauthor and moduleauthor directives will be shown in the 93 # output. They are ignored by default. 94 # show_authors = False 95 96 # The name of the Pygments (syntax highlighting) style to use. 97 pygments_style = 'sphinx' 98 99 # A list of ignored prefixes for module index sorting. 100 # modindex_common_prefix = [] 101 102 # If true, keep warnings as "system message" paragraphs in the built documents. 103 # keep_warnings = False 104 105 # If true, `todo` and `todoList` produce output, else they produce nothing. 106 todo_include_todos = False 107 108 109 # -- Options for HTML output ---------------------------------------------- 110 111 # The theme to use for HTML and HTML Help pages. See the documentation for 112 # a list of builtin themes. 113 if on_rtd: 114 html_theme = 'default' 115 else: 116 try: 117 # If you want to build the docs locally using the RTD theme, 118 # you may need to install it: ``pip install sphinx_rtd_theme``. 119 # https://github.com/snide/sphinx_rtd_theme#via-package 120 import sphinx_rtd_theme 121 html_theme = "sphinx_rtd_theme" 122 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 123 except ImportError: 124 # This theme is included with Sphinx and is quite nice (based 125 # on the Pocoo themes), but since we're using the RTD theme 126 # for the production docs, it's best to use that to avoid 127 # issues due to discrepancies between the themes. 128 html_theme = 'alabaster' 129 130 # Theme options are theme-specific and customize the look and feel of a theme 131 # further. For a list of options available for each theme, see the 132 # documentation. 133 # html_theme_options = {} 134 135 # Add any paths that contain custom themes here, relative to this directory. 136 # html_theme_path = [] 137 138 # The name for this set of Sphinx documents. If None, it defaults to 139 # "<project> v<release> documentation". 140 # html_title = None 141 142 # A shorter title for the navigation bar. Default is the same as html_title. 143 # html_short_title = None 144 145 # The name of an image file (relative to this directory) to place at the top 146 # of the sidebar. 147 html_logo = '../securedrop/static/i/favicon.png' 148 149 # The name of an image file (within the static path) to use as favicon of the 150 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 151 # pixels large. 152 # html_favicon = None 153 154 # Add any paths that contain custom static files (such as style sheets) here, 155 # relative to this directory. They are copied after the builtin static files, 156 # so a file named "default.css" will overwrite the builtin "default.css". 157 # html_static_path = ['_static'] 158 159 # Add any extra paths that contain custom files (such as robots.txt or 160 # .htaccess) here, relative to this directory. These files are copied 161 # directly to the root of the documentation. 162 # html_extra_path = [] 163 164 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 165 # using the given strftime format. 166 # html_last_updated_fmt = '%b %d, %Y' 167 168 # If true, SmartyPants will be used to convert quotes and dashes to 169 # typographically correct entities. 170 # html_use_smartypants = True 171 172 # Custom sidebar templates, maps document names to template names. 173 # html_sidebars = {} 174 175 # Additional templates that should be rendered to pages, maps page names to 176 # template names. 177 # html_additional_pages = {} 178 179 # If false, no module index is generated. 180 # html_domain_indices = True 181 182 # If false, no index is generated. 183 # html_use_index = True 184 185 # If true, the index is split into individual pages for each letter. 186 # html_split_index = False 187 188 # If true, links to the reST sources are added to the pages. 189 # html_show_sourcelink = True 190 191 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 192 # html_show_sphinx = True 193 194 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 195 # html_show_copyright = True 196 197 # If true, an OpenSearch description file will be output, and all pages will 198 # contain a <link> tag referring to it. The value of this option must be the 199 # base URL from which the finished HTML is served. 200 # html_use_opensearch = '' 201 202 # This is the file name suffix for HTML files (e.g. ".xhtml"). 203 # html_file_suffix = None 204 205 # Language to be used for generating the HTML full-text search index. 206 # Sphinx supports the following languages: 207 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 208 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 209 # html_search_language = 'en' 210 211 # A dictionary with options for the search language support, empty by default. 212 # Now only 'ja' uses this config value 213 # html_search_options = {'type': 'default'} 214 215 # The name of a javascript file (relative to the configuration directory) that 216 # implements a search results scorer. If empty, the default will be used. 217 # html_search_scorer = 'scorer.js' 218 219 # Output file base name for HTML help builder. 220 htmlhelp_basename = 'SecureDropdoc' 221 222 # -- Options for LaTeX output --------------------------------------------- 223 224 latex_elements = { 225 # The paper size ('letterpaper' or 'a4paper'). 226 # 'papersize': 'letterpaper', 227 228 # The font size ('10pt', '11pt' or '12pt'). 229 # 'pointsize': '10pt', 230 231 # Additional stuff for the LaTeX preamble. 232 # 'preamble': '', 233 234 # Latex figure (float) alignment 235 # 'figure_align': 'htbp', 236 } 237 238 # Grouping the document tree into LaTeX files. List of tuples 239 # (source start file, target name, title, 240 # author, documentclass [howto, manual, or own class]). 241 latex_documents = [ 242 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation', 243 author, 'manual'), 244 ] 245 246 # The name of an image file (relative to this directory) to place at the top of 247 # the title page. 248 # latex_logo = None 249 250 # For "manual" documents, if this is true, then toplevel headings are parts, 251 # not chapters. 252 # latex_use_parts = False 253 254 # If true, show page references after internal links. 255 # latex_show_pagerefs = False 256 257 # If true, show URL addresses after external links. 258 # latex_show_urls = False 259 260 # Documents to append as an appendix to all manuals. 261 # latex_appendices = [] 262 263 # If false, no module index is generated. 264 # latex_domain_indices = True 265 266 267 # -- Options for manual page output --------------------------------------- 268 269 # One entry per manual page. List of tuples 270 # (source start file, name, description, authors, manual section). 271 man_pages = [ 272 (master_doc, 'securedrop', u'SecureDrop Documentation', 273 [author], 1) 274 ] 275 276 # If true, show URL addresses after external links. 277 # man_show_urls = False 278 279 280 # -- Options for Texinfo output ------------------------------------------- 281 282 # Grouping the document tree into Texinfo files. List of tuples 283 # (source start file, target name, title, author, 284 # dir menu entry, description, category) 285 texinfo_documents = [ 286 (master_doc, 'SecureDrop', u'SecureDrop Documentation', 287 author, 'SecureDrop', 'One line description of project.', 288 'Miscellaneous'), 289 ] 290 291 # Documents to append as an appendix to all manuals. 292 # texinfo_appendices = [] 293 294 # If false, no module index is generated. 295 # texinfo_domain_indices = True 296 297 # How to display URL addresses: 'footnote', 'no', or 'inline'. 298 # texinfo_show_urls = 'footnote' 299 300 # If true, do not generate a @detailmenu in the "Top" node's menu. 301 # texinfo_no_detailmenu = False 302 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -57,9 +57,9 @@ # built documents. # # The short X.Y version. -version = '0.9.0~rc1' +version = '0.9.0~rc2' # The full version, including alpha/beta/rc tags. -release = '0.9.0~rc1' +release = '0.9.0~rc2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/securedrop/version.py b/securedrop/version.py --- a/securedrop/version.py +++ b/securedrop/version.py @@ -1 +1 @@ -__version__ = '0.9.0~rc1' +__version__ = '0.9.0~rc2'
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -57,9 +57,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.9.0~rc1'\n+version = '0.9.0~rc2'\n # The full version, including alpha/beta/rc tags.\n-release = '0.9.0~rc1'\n+release = '0.9.0~rc2'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.9.0~rc1'\n+__version__ = '0.9.0~rc2'\n", "issue": "Builder image needs updates\n## Description\r\n\r\nsd-builder image needs to be updated.\r\n\r\n## Steps to Reproduce\r\n\r\n`make build-debs` and observe error.\r\n\r\n## Expected Behavior\r\n\r\n`make build-debs` should exit without error.\r\n\r\n## Actual Behavior\r\n\r\n`make-build-debs` returns an error, security updates are needed for the container.\r\n## Comments\r\nInstructions are available here:\r\nhttps://docs.securedrop.org/en/latest/development/dockerbuildmaint.html\n", "before_files": [{"content": "__version__ = '0.9.0~rc1'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.9.0~rc1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.9.0~rc1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]}
4,023
200
gh_patches_debug_12398
rasdani/github-patches
git_diff
pre-commit__pre-commit-1590
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> improve `healthy()` check for node See pre-commit/actions#45 for `language_version: system` hooks this looks like: ``` eslint...................................................................Failed - hook id: eslint - exit code: 127 /home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory /home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory ##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1 ``` </issue> <code> [start of pre_commit/languages/node.py] 1 import contextlib 2 import functools 3 import os 4 import sys 5 from typing import Generator 6 from typing import Sequence 7 from typing import Tuple 8 9 import pre_commit.constants as C 10 from pre_commit import parse_shebang 11 from pre_commit.envcontext import envcontext 12 from pre_commit.envcontext import PatchesT 13 from pre_commit.envcontext import UNSET 14 from pre_commit.envcontext import Var 15 from pre_commit.hook import Hook 16 from pre_commit.languages import helpers 17 from pre_commit.languages.python import bin_dir 18 from pre_commit.prefix import Prefix 19 from pre_commit.util import clean_path_on_failure 20 from pre_commit.util import cmd_output 21 from pre_commit.util import cmd_output_b 22 23 ENVIRONMENT_DIR = 'node_env' 24 healthy = helpers.basic_healthy 25 26 27 @functools.lru_cache(maxsize=1) 28 def get_default_version() -> str: 29 # nodeenv does not yet support `-n system` on windows 30 if sys.platform == 'win32': 31 return C.DEFAULT 32 # if node is already installed, we can save a bunch of setup time by 33 # using the installed version 34 elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')): 35 return 'system' 36 else: 37 return C.DEFAULT 38 39 40 def _envdir(prefix: Prefix, version: str) -> str: 41 directory = helpers.environment_dir(ENVIRONMENT_DIR, version) 42 return prefix.path(directory) 43 44 45 def get_env_patch(venv: str) -> PatchesT: 46 if sys.platform == 'cygwin': # pragma: no cover 47 _, win_venv, _ = cmd_output('cygpath', '-w', venv) 48 install_prefix = fr'{win_venv.strip()}\bin' 49 lib_dir = 'lib' 50 elif sys.platform == 'win32': # pragma: no cover 51 install_prefix = bin_dir(venv) 52 lib_dir = 'Scripts' 53 else: # pragma: win32 no cover 54 install_prefix = venv 55 lib_dir = 'lib' 56 return ( 57 ('NODE_VIRTUAL_ENV', venv), 58 ('NPM_CONFIG_PREFIX', install_prefix), 59 ('npm_config_prefix', install_prefix), 60 ('NPM_CONFIG_USERCONFIG', UNSET), 61 ('npm_config_userconfig', UNSET), 62 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')), 63 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 64 ) 65 66 67 @contextlib.contextmanager 68 def in_env( 69 prefix: Prefix, 70 language_version: str, 71 ) -> Generator[None, None, None]: 72 with envcontext(get_env_patch(_envdir(prefix, language_version))): 73 yield 74 75 76 def install_environment( 77 prefix: Prefix, version: str, additional_dependencies: Sequence[str], 78 ) -> None: 79 additional_dependencies = tuple(additional_dependencies) 80 assert prefix.exists('package.json') 81 envdir = _envdir(prefix, version) 82 83 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath 84 if sys.platform == 'win32': # pragma: no cover 85 envdir = fr'\\?\{os.path.normpath(envdir)}' 86 with clean_path_on_failure(envdir): 87 cmd = [ 88 sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir, 89 ] 90 if version != C.DEFAULT: 91 cmd.extend(['-n', version]) 92 cmd_output_b(*cmd) 93 94 with in_env(prefix, version): 95 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449 96 # install as if we installed from git 97 helpers.run_setup_cmd(prefix, ('npm', 'install')) 98 helpers.run_setup_cmd( 99 prefix, 100 ('npm', 'install', '-g', '.', *additional_dependencies), 101 ) 102 103 104 def run_hook( 105 hook: Hook, 106 file_args: Sequence[str], 107 color: bool, 108 ) -> Tuple[int, bytes]: 109 with in_env(hook.prefix, hook.language_version): 110 return helpers.run_xargs(hook, hook.cmd, file_args, color=color) 111 [end of pre_commit/languages/node.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py --- a/pre_commit/languages/node.py +++ b/pre_commit/languages/node.py @@ -21,7 +21,6 @@ from pre_commit.util import cmd_output_b ENVIRONMENT_DIR = 'node_env' -healthy = helpers.basic_healthy @functools.lru_cache(maxsize=1) @@ -73,6 +72,12 @@ yield +def healthy(prefix: Prefix, language_version: str) -> bool: + with in_env(prefix, language_version): + retcode, _, _ = cmd_output_b('node', '--version', retcode=None) + return retcode == 0 + + def install_environment( prefix: Prefix, version: str, additional_dependencies: Sequence[str], ) -> None:
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -21,7 +21,6 @@\n from pre_commit.util import cmd_output_b\n \n ENVIRONMENT_DIR = 'node_env'\n-healthy = helpers.basic_healthy\n \n \n @functools.lru_cache(maxsize=1)\n@@ -73,6 +72,12 @@\n yield\n \n \n+def healthy(prefix: Prefix, language_version: str) -> bool:\n+ with in_env(prefix, language_version):\n+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\n+ return retcode == 0\n+\n+\n def install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n ) -> None:\n", "issue": "improve `healthy()` check for node\nSee pre-commit/actions#45\r\n\r\nfor `language_version: system` hooks this looks like:\r\n\r\n```\r\neslint...................................................................Failed\r\n- hook id: eslint\r\n- exit code: 127\r\n\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n\r\n##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1\r\n```\r\n\r\n\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}]}
1,885
183
gh_patches_debug_1654
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1515
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add readthedocs documentation for remoulade instrumentation Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491) </issue> <code> [start of instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Usage 17 ----- 18 19 * Start broker backend 20 21 :: 22 23 docker run -p 5672:5672 rabbitmq 24 25 * Run instrumented actor 26 27 .. code-block:: python 28 29 from remoulade.brokers.rabbitmq import RabbitmqBroker 30 import remoulade 31 32 RemouladeInstrumentor().instrument() 33 34 broker = RabbitmqBroker() 35 remoulade.set_broker(broker) 36 37 @remoulade.actor 38 def multiply(x, y): 39 return x * y 40 41 broker.declare_actor(count_words) 42 43 multiply.send(43, 51) 44 45 """ 46 from typing import Collection 47 48 from remoulade import Middleware, broker 49 50 from opentelemetry import trace 51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 52 from opentelemetry.instrumentation.remoulade import utils 53 from opentelemetry.instrumentation.remoulade.package import _instruments 54 from opentelemetry.instrumentation.remoulade.version import __version__ 55 from opentelemetry.propagate import extract, inject 56 from opentelemetry.semconv.trace import SpanAttributes 57 58 _REMOULADE_MESSAGE_TAG_KEY = "remoulade.action" 59 _REMOULADE_MESSAGE_SEND = "send" 60 _REMOULADE_MESSAGE_RUN = "run" 61 62 _REMOULADE_MESSAGE_NAME_KEY = "remoulade.actor_name" 63 64 _REMOULADE_MESSAGE_RETRY_COUNT_KEY = "remoulade.retry_count" 65 66 67 class _InstrumentationMiddleware(Middleware): 68 def __init__(self, _tracer): 69 self._tracer = _tracer 70 self._span_registry = {} 71 72 def before_process_message(self, _broker, message): 73 if "trace_ctx" not in message.options: 74 return 75 76 trace_ctx = extract(message.options["trace_ctx"]) 77 retry_count = message.options.get("retries", 0) 78 operation_name = utils.get_operation_name( 79 "before_process_message", retry_count 80 ) 81 span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count} 82 83 span = self._tracer.start_span( 84 operation_name, 85 kind=trace.SpanKind.CONSUMER, 86 context=trace_ctx, 87 attributes=span_attributes, 88 ) 89 90 activation = trace.use_span(span, end_on_exit=True) 91 activation.__enter__() # pylint: disable=E1101 92 93 utils.attach_span( 94 self._span_registry, message.message_id, (span, activation) 95 ) 96 97 def after_process_message( 98 self, _broker, message, *, result=None, exception=None 99 ): 100 span, activation = utils.retrieve_span( 101 self._span_registry, message.message_id 102 ) 103 104 if span is None: 105 # no existing span found for message_id 106 return 107 108 if span.is_recording(): 109 span.set_attributes( 110 { 111 _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN, 112 _REMOULADE_MESSAGE_NAME_KEY: message.actor_name, 113 SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id, 114 } 115 ) 116 117 activation.__exit__(None, None, None) 118 utils.detach_span(self._span_registry, message.message_id) 119 120 def before_enqueue(self, _broker, message, delay): 121 retry_count = message.options.get("retries", 0) 122 operation_name = utils.get_operation_name( 123 "before_enqueue", retry_count 124 ) 125 span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count} 126 127 span = self._tracer.start_span( 128 operation_name, 129 kind=trace.SpanKind.PRODUCER, 130 attributes=span_attributes, 131 ) 132 133 if span.is_recording(): 134 span.set_attributes( 135 { 136 _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND, 137 _REMOULADE_MESSAGE_NAME_KEY: message.actor_name, 138 SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id, 139 } 140 ) 141 142 activation = trace.use_span(span, end_on_exit=True) 143 activation.__enter__() # pylint: disable=E1101 144 145 utils.attach_span( 146 self._span_registry, 147 message.message_id, 148 (span, activation), 149 is_publish=True, 150 ) 151 152 if "trace_ctx" not in message.options: 153 message.options["trace_ctx"] = {} 154 inject(message.options["trace_ctx"]) 155 156 def after_enqueue(self, _broker, message, delay, exception=None): 157 _, activation = utils.retrieve_span( 158 self._span_registry, message.message_id, is_publish=True 159 ) 160 161 if activation is None: 162 # no existing span found for message_id 163 return 164 165 activation.__exit__(None, None, None) 166 utils.detach_span( 167 self._span_registry, message.message_id, is_publish=True 168 ) 169 170 171 class RemouladeInstrumentor(BaseInstrumentor): 172 def instrumentation_dependencies(self) -> Collection[str]: 173 return _instruments 174 175 def _instrument(self, **kwargs): 176 tracer_provider = kwargs.get("tracer_provider") 177 178 # pylint: disable=attribute-defined-outside-init 179 self._tracer = trace.get_tracer(__name__, __version__, tracer_provider) 180 instrumentation_middleware = _InstrumentationMiddleware(self._tracer) 181 182 broker.add_extra_default_middleware(instrumentation_middleware) 183 184 def _uninstrument(self, **kwargs): 185 broker.remove_extra_default_middleware(_InstrumentationMiddleware) 186 [end of instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py --- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py @@ -16,13 +16,13 @@ Usage ----- -* Start broker backend +Start broker backend :: docker run -p 5672:5672 rabbitmq -* Run instrumented actor +Run instrumented actor .. code-block:: python
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n@@ -16,13 +16,13 @@\n Usage\n -----\n \n-* Start broker backend\n+Start broker backend\n \n ::\n \n docker run -p 5672:5672 rabbitmq\n \n-* Run instrumented actor\n+Run instrumented actor\n \n .. code-block:: python\n", "issue": "Add readthedocs documentation for remoulade instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage\n-----\n\n* Start broker backend\n\n::\n\n docker run -p 5672:5672 rabbitmq\n\n* Run instrumented actor\n\n.. code-block:: python\n\n from remoulade.brokers.rabbitmq import RabbitmqBroker\n import remoulade\n\n RemouladeInstrumentor().instrument()\n\n broker = RabbitmqBroker()\n remoulade.set_broker(broker)\n\n @remoulade.actor\n def multiply(x, y):\n return x * y\n\n broker.declare_actor(count_words)\n\n multiply.send(43, 51)\n\n\"\"\"\nfrom typing import Collection\n\nfrom remoulade import Middleware, broker\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.remoulade import utils\nfrom opentelemetry.instrumentation.remoulade.package import _instruments\nfrom opentelemetry.instrumentation.remoulade.version import __version__\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.semconv.trace import SpanAttributes\n\n_REMOULADE_MESSAGE_TAG_KEY = \"remoulade.action\"\n_REMOULADE_MESSAGE_SEND = \"send\"\n_REMOULADE_MESSAGE_RUN = \"run\"\n\n_REMOULADE_MESSAGE_NAME_KEY = \"remoulade.actor_name\"\n\n_REMOULADE_MESSAGE_RETRY_COUNT_KEY = \"remoulade.retry_count\"\n\n\nclass _InstrumentationMiddleware(Middleware):\n def __init__(self, _tracer):\n self._tracer = _tracer\n self._span_registry = {}\n\n def before_process_message(self, _broker, message):\n if \"trace_ctx\" not in message.options:\n return\n\n trace_ctx = extract(message.options[\"trace_ctx\"])\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_process_message\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.CONSUMER,\n context=trace_ctx,\n attributes=span_attributes,\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry, message.message_id, (span, activation)\n )\n\n def after_process_message(\n self, _broker, message, *, result=None, exception=None\n ):\n span, activation = utils.retrieve_span(\n self._span_registry, message.message_id\n )\n\n if span is None:\n # no existing span found for message_id\n return\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation.__exit__(None, None, None)\n utils.detach_span(self._span_registry, message.message_id)\n\n def before_enqueue(self, _broker, message, delay):\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_enqueue\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.PRODUCER,\n attributes=span_attributes,\n )\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry,\n message.message_id,\n (span, activation),\n is_publish=True,\n )\n\n if \"trace_ctx\" not in message.options:\n message.options[\"trace_ctx\"] = {}\n inject(message.options[\"trace_ctx\"])\n\n def after_enqueue(self, _broker, message, delay, exception=None):\n _, activation = utils.retrieve_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n if activation is None:\n # no existing span found for message_id\n return\n\n activation.__exit__(None, None, None)\n utils.detach_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n\nclass RemouladeInstrumentor(BaseInstrumentor):\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\n\n broker.add_extra_default_middleware(instrumentation_middleware)\n\n def _uninstrument(self, **kwargs):\n broker.remove_extra_default_middleware(_InstrumentationMiddleware)\n", "path": "instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py"}]}
2,345
186
gh_patches_debug_22938
rasdani/github-patches
git_diff
bridgecrewio__checkov-215
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Don't crash on IAM Policies that do not have an "effect" **Describe the bug** [`Effect`](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html#effect) is an optional token on AWS IAM policy documents. It will default to Allow. While it is perhaps good to be explicit, it should not crash Checkov. ``` ERROR:checkov.terraform.checks.data.aws.AdminPolicyDocument:Failed to run check: Ensure IAM policies that allow full "*-*" administrative privileges are not created for configuration: {'statement': .... Traceback (most recent call last): File "/usr/local/bin/checkov", line 5, in <module> run() File "/usr/local/lib/python3.8/site-packages/checkov/main.py", line 47, in run scan_reports = runner_registry.run(root_folder, external_checks_dir=args.external_checks_dir, files=file) File "/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py", line 20, in run scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files) File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 38, in run self.check_tf_definition(report, root_folder, tf_definitions) File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 66, in check_tf_definition self.run_block(definition[1][block_type], definitions_context, full_file_path, report, scanned_file, File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 88, in run_block results = registry.scan(scanned_file, entity, File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 48, in scan result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration, File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 44, in run raise e File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 33, in run check_result['result'] = self.scan_entity_conf(entity_configuration) File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/base_check.py", line 19, in scan_entity_conf return self.scan_data_conf(conf) File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/aws/AdminPolicyDocument.py", line 23, in scan_data_conf if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]: KeyError: 'effect' ``` **To Reproduce** Steps to reproduce the behavior: 1. Write a terraform file: ``` provider "aws" { region = "us-east-1" } data "aws_iam_policy_document" "test-policy" { statement { actions = ["s3:ListBucket"] resources = ["arn:aws:s3:::*"] } } resource "aws_iam_policy" "test-policy" { name = "test-policy" policy = data.aws_iam_policy_document.test-policy.json } ``` **Expected behavior** This should not crash **Additional context** When trying to debug this, I started getting a *different* error. ```Traceback (most recent call last): File "/home/matthew/.local/bin/checkov", line 2, in <module> from checkov.main import run File "/home/matthew/.local/lib/python3.7/site-packages/checkov/main.py", line 5, in <module> from checkov.common.util.docs_generator import print_checks File "/home/matthew/.local/lib/python3.7/site-packages/checkov/common/util/docs_generator.py", line 5, in <module> from checkov.terraform.checks.data.registry import data_registry File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/__init__.py", line 1, in <module> from checkov.terraform.checks.resource import * File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/__init__.py", line 1, in <module> from checkov.terraform.checks.resource.aws import * File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py", line 27, in <module> check = AdminPolicyDocument() File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py", line 11, in __init__ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource) File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py", line 18, in __init__ resource_registry.register(self) File "/home/matthew/.local/lib/python3.7/site-packages/checkov/common/checks/base_check_registry.py", line 16, in register for entity in check.supported_entities: AttributeError: 'AdminPolicyDocument' object has no attribute 'supported_entities' ``` </issue> <code> [start of checkov/terraform/checks/data/aws/AdminPolicyDocument.py] 1 from checkov.terraform.checks.data.base_check import BaseDataCheck 2 from checkov.common.models.enums import CheckResult, CheckCategories 3 4 5 class AdminPolicyDocument(BaseDataCheck): 6 def __init__(self): 7 name = "Ensure IAM policies that allow full \"*-*\" administrative privileges are not created" 8 id = "CKV_AWS_1" 9 supported_data = ['aws_iam_policy_document'] 10 categories = [CheckCategories.IAM] 11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data) 12 13 def scan_data_conf(self, conf): 14 """ 15 validates iam policy document 16 https://learn.hashicorp.com/terraform/aws/iam-policy 17 :param conf: aws_kms_key configuration 18 :return: <CheckResult> 19 """ 20 key = 'statement' 21 if key in conf.keys(): 22 for statement in conf[key]: 23 if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]: 24 return CheckResult.FAILED 25 return CheckResult.PASSED 26 27 28 check = AdminPolicyDocument() 29 [end of checkov/terraform/checks/data/aws/AdminPolicyDocument.py] [start of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py] 1 from checkov.terraform.checks.data.base_check import BaseDataCheck 2 from checkov.common.models.enums import CheckResult, CheckCategories 3 4 5 class StarActionPolicyDocument(BaseDataCheck): 6 def __init__(self): 7 name = "Ensure no IAM policies documents allow \"*\" as a statement's actions" 8 id = "CKV_AWS_49" 9 supported_data = ['aws_iam_policy_document'] 10 categories = [CheckCategories.IAM] 11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data) 12 13 def scan_data_conf(self, conf): 14 """ 15 validates iam policy document 16 https://learn.hashicorp.com/terraform/aws/iam-policy 17 :param conf: aws_kms_key configuration 18 :return: <CheckResult> 19 """ 20 key = 'statement' 21 if key in conf.keys(): 22 for statement in conf['statement']: 23 if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == "Allow": 24 return CheckResult.FAILED 25 return CheckResult.PASSED 26 27 28 check = StarActionPolicyDocument() 29 [end of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py --- a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py +++ b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py @@ -20,7 +20,8 @@ key = 'statement' if key in conf.keys(): for statement in conf[key]: - if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]: + if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \ + and '*' in statement['resources'][0]: return CheckResult.FAILED return CheckResult.PASSED diff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py --- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py +++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py @@ -20,7 +20,7 @@ key = 'statement' if key in conf.keys(): for statement in conf['statement']: - if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == "Allow": + if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow': return CheckResult.FAILED return CheckResult.PASSED
{"golden_diff": "diff --git a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n@@ -20,7 +20,8 @@\n key = 'statement'\n if key in conf.keys():\n for statement in conf[key]:\n- if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\n+ if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \\\n+ and '*' in statement['resources'][0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n \ndiff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n@@ -20,7 +20,7 @@\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n- if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \"Allow\":\n+ if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "Don't crash on IAM Policies that do not have an \"effect\"\n**Describe the bug**\r\n[`Effect`](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html#effect) is an optional token on AWS IAM policy documents. It will default to Allow.\r\nWhile it is perhaps good to be explicit, it should not crash Checkov.\r\n\r\n```\r\nERROR:checkov.terraform.checks.data.aws.AdminPolicyDocument:Failed to run check: Ensure IAM policies that allow full \"*-*\" administrative privileges are not created for configuration: {'statement': ....\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/checkov\", line 5, in <module>\r\n run()\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/main.py\", line 47, in run\r\n scan_reports = runner_registry.run(root_folder, external_checks_dir=args.external_checks_dir, files=file)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\", line 20, in run\r\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 38, in run\r\n self.check_tf_definition(report, root_folder, tf_definitions)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 66, in check_tf_definition\r\n self.run_block(definition[1][block_type], definitions_context, full_file_path, report, scanned_file,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 88, in run_block\r\n results = registry.scan(scanned_file, entity,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 48, in scan\r\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 44, in run\r\n raise e\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 33, in run\r\n check_result['result'] = self.scan_entity_conf(entity_configuration)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/base_check.py\", line 19, in scan_entity_conf\r\n return self.scan_data_conf(conf)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\", line 23, in scan_data_conf\r\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\r\nKeyError: 'effect'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Write a terraform file:\r\n```\r\nprovider \"aws\" {\r\n region = \"us-east-1\"\r\n}\r\n\r\ndata \"aws_iam_policy_document\" \"test-policy\" {\r\n statement {\r\n actions = [\"s3:ListBucket\"]\r\n resources = [\"arn:aws:s3:::*\"]\r\n }\r\n}\r\n\r\nresource \"aws_iam_policy\" \"test-policy\" {\r\n name = \"test-policy\"\r\n policy = data.aws_iam_policy_document.test-policy.json\r\n}\r\n\r\n```\r\n\r\n**Expected behavior**\r\nThis should not crash\r\n\r\n**Additional context**\r\nWhen trying to debug this, I started getting a *different* error.\r\n```Traceback (most recent call last):\r\n File \"/home/matthew/.local/bin/checkov\", line 2, in <module>\r\n from checkov.main import run\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/main.py\", line 5, in <module>\r\n from checkov.common.util.docs_generator import print_checks\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/util/docs_generator.py\", line 5, in <module>\r\n from checkov.terraform.checks.data.registry import data_registry\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/__init__.py\", line 1, in <module>\r\n from checkov.terraform.checks.resource import *\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/__init__.py\", line 1, in <module>\r\n from checkov.terraform.checks.resource.aws import *\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\", line 27, in <module>\r\n check = AdminPolicyDocument()\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\", line 11, in __init__\r\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py\", line 18, in __init__\r\n resource_registry.register(self)\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/checks/base_check_registry.py\", line 16, in register\r\n for entity in check.supported_entities:\r\nAttributeError: 'AdminPolicyDocument' object has no attribute 'supported_entities'\r\n```\r\n\n", "before_files": [{"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass AdminPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created\"\n id = \"CKV_AWS_1\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf[key]:\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = AdminPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/AdminPolicyDocument.py"}, {"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass StarActionPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_49\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \"Allow\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = StarActionPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/StarActionPolicyDocument.py"}]}
2,356
366
gh_patches_debug_41694
rasdani/github-patches
git_diff
deepset-ai__haystack-7994
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add `max_retries` and `timeout` params to all `AzureOpenAI` classes **Is your feature request related to a problem? Please describe.** Currently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params. The corresponding `AzureOpenAI` don't always have the same params. **Describe the solution you'd like** It would be nice to have these params in the `AzureOpenAI` classes **Describe alternatives you've considered** Subclass `AzureOpenAI` and create custom components. **Additional context** cc @anakin87 :) </issue> <code> [start of haystack/components/embedders/azure_document_embedder.py] 1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]> 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 5 import os 6 from typing import Any, Dict, List, Optional, Tuple 7 8 from openai.lib.azure import AzureOpenAI 9 from tqdm import tqdm 10 11 from haystack import Document, component, default_from_dict, default_to_dict 12 from haystack.utils import Secret, deserialize_secrets_inplace 13 14 15 @component 16 class AzureOpenAIDocumentEmbedder: 17 """ 18 A component for computing Document embeddings using OpenAI models on Azure. 19 20 Usage example: 21 ```python 22 from haystack import Document 23 from haystack.components.embedders import AzureOpenAIDocumentEmbedder 24 25 doc = Document(content="I love pizza!") 26 27 document_embedder = AzureOpenAIDocumentEmbedder() 28 29 result = document_embedder.run([doc]) 30 print(result['documents'][0].embedding) 31 32 # [0.017020374536514282, -0.023255806416273117, ...] 33 ``` 34 """ 35 36 def __init__( 37 self, 38 azure_endpoint: Optional[str] = None, 39 api_version: Optional[str] = "2023-05-15", 40 azure_deployment: str = "text-embedding-ada-002", 41 dimensions: Optional[int] = None, 42 api_key: Optional[Secret] = Secret.from_env_var("AZURE_OPENAI_API_KEY", strict=False), 43 azure_ad_token: Optional[Secret] = Secret.from_env_var("AZURE_OPENAI_AD_TOKEN", strict=False), 44 organization: Optional[str] = None, 45 prefix: str = "", 46 suffix: str = "", 47 batch_size: int = 32, 48 progress_bar: bool = True, 49 meta_fields_to_embed: Optional[List[str]] = None, 50 embedding_separator: str = "\n", 51 ): 52 """ 53 Create an AzureOpenAIDocumentEmbedder component. 54 55 :param azure_endpoint: 56 The endpoint of the deployed model. 57 :param api_version: 58 The version of the API to use. 59 :param azure_deployment: 60 The deployment of the model, usually matches the model name. 61 :param dimensions: 62 The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 63 and later models. 64 :param api_key: 65 The API key used for authentication. 66 :param azure_ad_token: 67 Microsoft Entra ID token, see Microsoft's official 68 [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id) 69 documentation for more information. 70 Used to be called Azure Active Directory. 71 :param organization: 72 The Organization ID. See OpenAI's 73 [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization) 74 for more information. 75 :param prefix: 76 A string to add at the beginning of each text. 77 :param suffix: 78 A string to add at the end of each text. 79 :param batch_size: 80 Number of Documents to encode at once. 81 :param progress_bar: 82 If True shows a progress bar when running. 83 :param meta_fields_to_embed: 84 List of meta fields that will be embedded along with the Document text. 85 :param embedding_separator: 86 Separator used to concatenate the meta fields to the Document text. 87 """ 88 # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT 89 azure_endpoint = azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT") 90 if not azure_endpoint: 91 raise ValueError("Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.") 92 93 if api_key is None and azure_ad_token is None: 94 raise ValueError("Please provide an API key or an Azure Active Directory token.") 95 96 self.api_key = api_key 97 self.azure_ad_token = azure_ad_token 98 self.api_version = api_version 99 self.azure_endpoint = azure_endpoint 100 self.azure_deployment = azure_deployment 101 self.dimensions = dimensions 102 self.organization = organization 103 self.prefix = prefix 104 self.suffix = suffix 105 self.batch_size = batch_size 106 self.progress_bar = progress_bar 107 self.meta_fields_to_embed = meta_fields_to_embed or [] 108 self.embedding_separator = embedding_separator 109 110 self._client = AzureOpenAI( 111 api_version=api_version, 112 azure_endpoint=azure_endpoint, 113 azure_deployment=azure_deployment, 114 api_key=api_key.resolve_value() if api_key is not None else None, 115 azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None, 116 organization=organization, 117 ) 118 119 def _get_telemetry_data(self) -> Dict[str, Any]: 120 """ 121 Data that is sent to Posthog for usage analytics. 122 """ 123 return {"model": self.azure_deployment} 124 125 def to_dict(self) -> Dict[str, Any]: 126 """ 127 Serializes the component to a dictionary. 128 129 :returns: 130 Dictionary with serialized data. 131 """ 132 return default_to_dict( 133 self, 134 azure_endpoint=self.azure_endpoint, 135 azure_deployment=self.azure_deployment, 136 dimensions=self.dimensions, 137 organization=self.organization, 138 api_version=self.api_version, 139 prefix=self.prefix, 140 suffix=self.suffix, 141 batch_size=self.batch_size, 142 progress_bar=self.progress_bar, 143 meta_fields_to_embed=self.meta_fields_to_embed, 144 embedding_separator=self.embedding_separator, 145 api_key=self.api_key.to_dict() if self.api_key is not None else None, 146 azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None, 147 ) 148 149 @classmethod 150 def from_dict(cls, data: Dict[str, Any]) -> "AzureOpenAIDocumentEmbedder": 151 """ 152 Deserializes the component from a dictionary. 153 154 :param data: 155 Dictionary to deserialize from. 156 :returns: 157 Deserialized component. 158 """ 159 deserialize_secrets_inplace(data["init_parameters"], keys=["api_key", "azure_ad_token"]) 160 return default_from_dict(cls, data) 161 162 def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]: 163 """ 164 Prepare the texts to embed by concatenating the Document text with the metadata fields to embed. 165 """ 166 texts_to_embed = [] 167 for doc in documents: 168 meta_values_to_embed = [ 169 str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None 170 ] 171 172 text_to_embed = ( 173 self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or ""]) + self.suffix 174 ).replace("\n", " ") 175 176 texts_to_embed.append(text_to_embed) 177 return texts_to_embed 178 179 def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]: 180 """ 181 Embed a list of texts in batches. 182 """ 183 184 all_embeddings: List[List[float]] = [] 185 meta: Dict[str, Any] = {"model": "", "usage": {"prompt_tokens": 0, "total_tokens": 0}} 186 for i in tqdm(range(0, len(texts_to_embed), batch_size), desc="Embedding Texts"): 187 batch = texts_to_embed[i : i + batch_size] 188 if self.dimensions is not None: 189 response = self._client.embeddings.create( 190 model=self.azure_deployment, dimensions=self.dimensions, input=batch 191 ) 192 else: 193 response = self._client.embeddings.create(model=self.azure_deployment, input=batch) 194 195 # Append embeddings to the list 196 all_embeddings.extend(el.embedding for el in response.data) 197 198 # Update the meta information only once if it's empty 199 if not meta["model"]: 200 meta["model"] = response.model 201 meta["usage"] = dict(response.usage) 202 else: 203 # Update the usage tokens 204 meta["usage"]["prompt_tokens"] += response.usage.prompt_tokens 205 meta["usage"]["total_tokens"] += response.usage.total_tokens 206 207 return all_embeddings, meta 208 209 @component.output_types(documents=List[Document], meta=Dict[str, Any]) 210 def run(self, documents: List[Document]) -> Dict[str, Any]: 211 """ 212 Embed a list of Documents. 213 214 :param documents: 215 Documents to embed. 216 217 :returns: 218 A dictionary with the following keys: 219 - `documents`: Documents with embeddings 220 - `meta`: Information about the usage of the model. 221 """ 222 if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)): 223 raise TypeError("Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.") 224 225 texts_to_embed = self._prepare_texts_to_embed(documents=documents) 226 embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size) 227 228 # Assign the corresponding embeddings to each document 229 for doc, emb in zip(documents, embeddings): 230 doc.embedding = emb 231 232 return {"documents": documents, "meta": meta} 233 [end of haystack/components/embedders/azure_document_embedder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/components/embedders/azure_document_embedder.py b/haystack/components/embedders/azure_document_embedder.py --- a/haystack/components/embedders/azure_document_embedder.py +++ b/haystack/components/embedders/azure_document_embedder.py @@ -33,7 +33,7 @@ ``` """ - def __init__( + def __init__( # noqa: PLR0913 (too-many-arguments) self, azure_endpoint: Optional[str] = None, api_version: Optional[str] = "2023-05-15", @@ -48,6 +48,8 @@ progress_bar: bool = True, meta_fields_to_embed: Optional[List[str]] = None, embedding_separator: str = "\n", + timeout: Optional[float] = None, + max_retries: Optional[int] = None, ): """ Create an AzureOpenAIDocumentEmbedder component. @@ -84,6 +86,10 @@ List of meta fields that will be embedded along with the Document text. :param embedding_separator: Separator used to concatenate the meta fields to the Document text. + :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is + inferred from the `OPENAI_TIMEOUT` environment variable or set to 30. + :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error, + if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5. """ # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT azure_endpoint = azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT") @@ -106,6 +112,8 @@ self.progress_bar = progress_bar self.meta_fields_to_embed = meta_fields_to_embed or [] self.embedding_separator = embedding_separator + self.timeout = timeout or float(os.environ.get("OPENAI_TIMEOUT", 30.0)) + self.max_retries = max_retries or int(os.environ.get("OPENAI_MAX_RETRIES", 5)) self._client = AzureOpenAI( api_version=api_version, @@ -114,6 +122,8 @@ api_key=api_key.resolve_value() if api_key is not None else None, azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None, organization=organization, + timeout=self.timeout, + max_retries=self.max_retries, ) def _get_telemetry_data(self) -> Dict[str, Any]: @@ -144,6 +154,8 @@ embedding_separator=self.embedding_separator, api_key=self.api_key.to_dict() if self.api_key is not None else None, azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None, + timeout=self.timeout, + max_retries=self.max_retries, ) @classmethod
{"golden_diff": "diff --git a/haystack/components/embedders/azure_document_embedder.py b/haystack/components/embedders/azure_document_embedder.py\n--- a/haystack/components/embedders/azure_document_embedder.py\n+++ b/haystack/components/embedders/azure_document_embedder.py\n@@ -33,7 +33,7 @@\n ```\n \"\"\"\n \n- def __init__(\n+ def __init__( # noqa: PLR0913 (too-many-arguments)\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n@@ -48,6 +48,8 @@\n progress_bar: bool = True,\n meta_fields_to_embed: Optional[List[str]] = None,\n embedding_separator: str = \"\\n\",\n+ timeout: Optional[float] = None,\n+ max_retries: Optional[int] = None,\n ):\n \"\"\"\n Create an AzureOpenAIDocumentEmbedder component.\n@@ -84,6 +86,10 @@\n List of meta fields that will be embedded along with the Document text.\n :param embedding_separator:\n Separator used to concatenate the meta fields to the Document text.\n+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is\n+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.\n+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,\n+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.\n \"\"\"\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n@@ -106,6 +112,8 @@\n self.progress_bar = progress_bar\n self.meta_fields_to_embed = meta_fields_to_embed or []\n self.embedding_separator = embedding_separator\n+ self.timeout = timeout or float(os.environ.get(\"OPENAI_TIMEOUT\", 30.0))\n+ self.max_retries = max_retries or int(os.environ.get(\"OPENAI_MAX_RETRIES\", 5))\n \n self._client = AzureOpenAI(\n api_version=api_version,\n@@ -114,6 +122,8 @@\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n def _get_telemetry_data(self) -> Dict[str, Any]:\n@@ -144,6 +154,8 @@\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n @classmethod\n", "issue": "Add `max_retries` and `timeout` params to all `AzureOpenAI` classes\n**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.\r\n\r\nThe corresponding `AzureOpenAI` don't always have the same params.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIt would be nice to have these params in the `AzureOpenAI` classes\r\n\r\n**Describe alternatives you've considered**\r\n\r\nSubclass `AzureOpenAI` and create custom components.\r\n\r\n**Additional context**\r\n\r\ncc @anakin87 :)\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom openai.lib.azure import AzureOpenAI\nfrom tqdm import tqdm\n\nfrom haystack import Document, component, default_from_dict, default_to_dict\nfrom haystack.utils import Secret, deserialize_secrets_inplace\n\n\n@component\nclass AzureOpenAIDocumentEmbedder:\n \"\"\"\n A component for computing Document embeddings using OpenAI models on Azure.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.embedders import AzureOpenAIDocumentEmbedder\n\n doc = Document(content=\"I love pizza!\")\n\n document_embedder = AzureOpenAIDocumentEmbedder()\n\n result = document_embedder.run([doc])\n print(result['documents'][0].embedding)\n\n # [0.017020374536514282, -0.023255806416273117, ...]\n ```\n \"\"\"\n\n def __init__(\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n azure_deployment: str = \"text-embedding-ada-002\",\n dimensions: Optional[int] = None,\n api_key: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_API_KEY\", strict=False),\n azure_ad_token: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_AD_TOKEN\", strict=False),\n organization: Optional[str] = None,\n prefix: str = \"\",\n suffix: str = \"\",\n batch_size: int = 32,\n progress_bar: bool = True,\n meta_fields_to_embed: Optional[List[str]] = None,\n embedding_separator: str = \"\\n\",\n ):\n \"\"\"\n Create an AzureOpenAIDocumentEmbedder component.\n\n :param azure_endpoint:\n The endpoint of the deployed model.\n :param api_version:\n The version of the API to use.\n :param azure_deployment:\n The deployment of the model, usually matches the model name.\n :param dimensions:\n The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3\n and later models.\n :param api_key:\n The API key used for authentication.\n :param azure_ad_token:\n Microsoft Entra ID token, see Microsoft's official\n [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)\n documentation for more information.\n Used to be called Azure Active Directory.\n :param organization:\n The Organization ID. See OpenAI's\n [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization)\n for more information.\n :param prefix:\n A string to add at the beginning of each text.\n :param suffix:\n A string to add at the end of each text.\n :param batch_size:\n Number of Documents to encode at once.\n :param progress_bar:\n If True shows a progress bar when running.\n :param meta_fields_to_embed:\n List of meta fields that will be embedded along with the Document text.\n :param embedding_separator:\n Separator used to concatenate the meta fields to the Document text.\n \"\"\"\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n if not azure_endpoint:\n raise ValueError(\"Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.\")\n\n if api_key is None and azure_ad_token is None:\n raise ValueError(\"Please provide an API key or an Azure Active Directory token.\")\n\n self.api_key = api_key\n self.azure_ad_token = azure_ad_token\n self.api_version = api_version\n self.azure_endpoint = azure_endpoint\n self.azure_deployment = azure_deployment\n self.dimensions = dimensions\n self.organization = organization\n self.prefix = prefix\n self.suffix = suffix\n self.batch_size = batch_size\n self.progress_bar = progress_bar\n self.meta_fields_to_embed = meta_fields_to_embed or []\n self.embedding_separator = embedding_separator\n\n self._client = AzureOpenAI(\n api_version=api_version,\n azure_endpoint=azure_endpoint,\n azure_deployment=azure_deployment,\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n )\n\n def _get_telemetry_data(self) -> Dict[str, Any]:\n \"\"\"\n Data that is sent to Posthog for usage analytics.\n \"\"\"\n return {\"model\": self.azure_deployment}\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"AzureOpenAIDocumentEmbedder\":\n \"\"\"\n Deserializes the component from a dictionary.\n\n :param data:\n Dictionary to deserialize from.\n :returns:\n Deserialized component.\n \"\"\"\n deserialize_secrets_inplace(data[\"init_parameters\"], keys=[\"api_key\", \"azure_ad_token\"])\n return default_from_dict(cls, data)\n\n def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = []\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n ).replace(\"\\n\", \" \")\n\n texts_to_embed.append(text_to_embed)\n return texts_to_embed\n\n def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n\n all_embeddings: List[List[float]] = []\n meta: Dict[str, Any] = {\"model\": \"\", \"usage\": {\"prompt_tokens\": 0, \"total_tokens\": 0}}\n for i in tqdm(range(0, len(texts_to_embed), batch_size), desc=\"Embedding Texts\"):\n batch = texts_to_embed[i : i + batch_size]\n if self.dimensions is not None:\n response = self._client.embeddings.create(\n model=self.azure_deployment, dimensions=self.dimensions, input=batch\n )\n else:\n response = self._client.embeddings.create(model=self.azure_deployment, input=batch)\n\n # Append embeddings to the list\n all_embeddings.extend(el.embedding for el in response.data)\n\n # Update the meta information only once if it's empty\n if not meta[\"model\"]:\n meta[\"model\"] = response.model\n meta[\"usage\"] = dict(response.usage)\n else:\n # Update the usage tokens\n meta[\"usage\"][\"prompt_tokens\"] += response.usage.prompt_tokens\n meta[\"usage\"][\"total_tokens\"] += response.usage.total_tokens\n\n return all_embeddings, meta\n\n @component.output_types(documents=List[Document], meta=Dict[str, Any])\n def run(self, documents: List[Document]) -> Dict[str, Any]:\n \"\"\"\n Embed a list of Documents.\n\n :param documents:\n Documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Documents with embeddings\n - `meta`: Information about the usage of the model.\n \"\"\"\n if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)):\n raise TypeError(\"Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.\")\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n # Assign the corresponding embeddings to each document\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents, \"meta\": meta}\n", "path": "haystack/components/embedders/azure_document_embedder.py"}]}
3,283
690
gh_patches_debug_16984
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-1872
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider sallybeauty is broken During the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson)) </issue> <code> [start of locations/spiders/sallybeauty.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 from locations.items import GeojsonPointItem 4 from urllib.parse import urlencode 5 import json 6 import csv 7 from locations.hours import OpeningHours 8 from scrapy.selector import Selector 9 10 11 class SallySpider(scrapy.Spider): 12 name = "sallybeauty" 13 item_attributes = { 'brand': "Sally Beauty" } 14 allowed_domains = ["sallybeauty.com"] 15 16 def start_requests(self): 17 base_url = "https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?" 18 19 point_files = [ 20 './locations/searchable_points/us_centroids_100mile_radius.csv', 21 './locations/searchable_points/ca_centroids_100mile_radius.csv' 22 ] 23 24 params = { 25 "showmap": "true", 26 "radius": "100", 27 } 28 29 for point_file in point_files: 30 with open(point_file) as points: 31 next(points) 32 for point in points: 33 _, lat, lon = point.strip().split(',') 34 params.update({"lat": lat, "long": lon}) 35 yield scrapy.Request(url=base_url + urlencode(params)) 36 37 def parse_hours(self, hours): 38 hrs = Selector(text=hours) 39 days = hrs.xpath('//div[@class="store-hours-day"]/text()').extract() 40 hours = hrs.xpath('//div[@class="store-hours-day"]/span/text()').extract() 41 42 opening_hours = OpeningHours() 43 44 for d, h in zip(days, hours): 45 try: 46 day = d.strip(': ') 47 open_time, close_time = h.split(' - ') 48 open_time = open_time.lstrip('0') 49 opening_hours.add_range(day=day[:2], 50 open_time=open_time, 51 close_time=close_time, 52 time_format="%I:%M %p") 53 except: 54 continue 55 56 return opening_hours.as_opening_hours() 57 58 def parse(self, response): 59 jdata = json.loads(response.body_as_unicode()) 60 61 for row in jdata.get('stores',[]): 62 63 properties = { 64 'ref': row["ID"], 65 'name': row["name"], 66 'addr_full': " ".join([row["address1"], row.get("address2", "") or ""]).strip(), 67 'city': row["city"], 68 'postcode': row["postalCode"], 69 'lat': row["latitude"], 70 'lon': row["longitude"], 71 'phone': row["phone"], 72 'state': row["stateCode"], 73 } 74 75 hours = self.parse_hours(row["storeHours"]) 76 if hours: 77 properties['opening_hours'] = hours 78 79 yield GeojsonPointItem(**properties) 80 [end of locations/spiders/sallybeauty.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py --- a/locations/spiders/sallybeauty.py +++ b/locations/spiders/sallybeauty.py @@ -58,7 +58,7 @@ def parse(self, response): jdata = json.loads(response.body_as_unicode()) - for row in jdata.get('stores',[]): + for row in jdata.get('stores', []): properties = { 'ref': row["ID"], @@ -72,8 +72,11 @@ 'state': row["stateCode"], } - hours = self.parse_hours(row["storeHours"]) - if hours: - properties['opening_hours'] = hours + store_hours = row.get("storeHours") + if store_hours: + hours = self.parse_hours(store_hours) + + if hours: + properties['opening_hours'] = hours yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py\n--- a/locations/spiders/sallybeauty.py\n+++ b/locations/spiders/sallybeauty.py\n@@ -58,7 +58,7 @@\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n \n- for row in jdata.get('stores',[]):\n+ for row in jdata.get('stores', []):\n \n properties = {\n 'ref': row[\"ID\"],\n@@ -72,8 +72,11 @@\n 'state': row[\"stateCode\"],\n }\n \n- hours = self.parse_hours(row[\"storeHours\"])\n- if hours:\n- properties['opening_hours'] = hours\n+ store_hours = row.get(\"storeHours\")\n+ if store_hours:\n+ hours = self.parse_hours(store_hours)\n+\n+ if hours:\n+ properties['opening_hours'] = hours\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider sallybeauty is broken\nDuring the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom urllib.parse import urlencode\nimport json\nimport csv\nfrom locations.hours import OpeningHours\nfrom scrapy.selector import Selector\n\n\nclass SallySpider(scrapy.Spider):\n name = \"sallybeauty\"\n item_attributes = { 'brand': \"Sally Beauty\" }\n allowed_domains = [\"sallybeauty.com\"]\n\n def start_requests(self):\n base_url = \"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\"\n\n point_files = [\n './locations/searchable_points/us_centroids_100mile_radius.csv',\n './locations/searchable_points/ca_centroids_100mile_radius.csv'\n ]\n\n params = {\n \"showmap\": \"true\",\n \"radius\": \"100\",\n }\n\n for point_file in point_files:\n with open(point_file) as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"lat\": lat, \"long\": lon})\n yield scrapy.Request(url=base_url + urlencode(params))\n\n def parse_hours(self, hours):\n hrs = Selector(text=hours)\n days = hrs.xpath('//div[@class=\"store-hours-day\"]/text()').extract()\n hours = hrs.xpath('//div[@class=\"store-hours-day\"]/span/text()').extract()\n\n opening_hours = OpeningHours()\n\n for d, h in zip(days, hours):\n try:\n day = d.strip(': ')\n open_time, close_time = h.split(' - ')\n open_time = open_time.lstrip('0')\n opening_hours.add_range(day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%I:%M %p\")\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n\n for row in jdata.get('stores',[]):\n\n properties = {\n 'ref': row[\"ID\"],\n 'name': row[\"name\"],\n 'addr_full': \" \".join([row[\"address1\"], row.get(\"address2\", \"\") or \"\"]).strip(),\n 'city': row[\"city\"],\n 'postcode': row[\"postalCode\"],\n 'lat': row[\"latitude\"],\n 'lon': row[\"longitude\"],\n 'phone': row[\"phone\"],\n 'state': row[\"stateCode\"],\n }\n\n hours = self.parse_hours(row[\"storeHours\"])\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sallybeauty.py"}]}
1,466
222
gh_patches_debug_55584
rasdani/github-patches
git_diff
wagtail__wagtail-1873
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Migrating to 1.1 Migration File Errors I am attempting to migrate to 1.1 and I am getting an error involving the migration files. ``` Migration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field') ``` The last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error. I tried manually applying 0016, but the error is preventing that from happening. I know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones. Migrating to 1.1 Migration File Errors I am attempting to migrate to 1.1 and I am getting an error involving the migration files. ``` Migration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field') ``` The last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error. I tried manually applying 0016, but the error is preventing that from happening. I know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones. </issue> <code> [start of wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from django.db import models, migrations 5 6 7 class Migration(migrations.Migration): 8 9 dependencies = [ 10 ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'), 11 ] 12 13 operations = [ 14 migrations.AlterField( 15 model_name='grouppagepermission', 16 name='permission_type', 17 field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'), 18 preserve_default=True, 19 ), 20 ] 21 [end of wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py --- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py +++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py @@ -7,7 +7,7 @@ class Migration(migrations.Migration): dependencies = [ - ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'), + ('wagtailcore', '0016_change_page_url_path_to_text_field'), ] operations = [
{"golden_diff": "diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n@@ -7,7 +7,7 @@\n class Migration(migrations.Migration):\n \n dependencies = [\n- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),\n ]\n \n operations = [\n", "issue": "Migrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\nMigrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grouppagepermission',\n name='permission_type',\n field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\n preserve_default=True,\n ),\n ]\n", "path": "wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py"}]}
1,191
171
gh_patches_debug_36962
rasdani/github-patches
git_diff
medtagger__MedTagger-188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Disable (temporarily) conversion in X & Y axes ## Expected Behavior Frontend won't be enabled soon, so backend should disable such conversion (for now). ## Actual Behavior Backend always create views from X & Y axes but are not used anywhere. ## Additional comment Let's make it optional and controllable somehow by users (ideally from the UI & API side?). </issue> <code> [start of backend/medtagger/workers/conversion.py] 1 """Module responsible for asynchronous data conversion.""" 2 import io 3 import os 4 import tempfile 5 from subprocess import call 6 from typing import List, Optional 7 8 import numpy as np 9 import pydicom 10 from pydicom.dataset import FileDataset 11 from PIL import Image 12 from celery.utils.log import get_task_logger 13 14 from medtagger.types import ScanID 15 from medtagger.workers import celery_app 16 from medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array 17 from medtagger.database.models import SliceOrientation, Slice, Scan 18 from medtagger.repositories.scans import ScansRepository 19 from medtagger.repositories.slices import SlicesRepository 20 21 logger = get_task_logger(__name__) 22 23 MAX_PREVIEW_X_SIZE = 256 24 25 26 @celery_app.task 27 def convert_scan_to_png(scan_id: ScanID) -> None: 28 """Store Scan in HBase database. 29 30 :param scan_id: ID of a Scan 31 """ 32 logger.info('Starting Scan (%s) conversion.', scan_id) 33 temp_files_to_remove: List[str] = [] 34 scan = ScansRepository.get_scan_by_id(scan_id) 35 slices = SlicesRepository.get_slices_by_scan_id(scan_id) 36 if scan.declared_number_of_slices == 0: 37 logger.error('This Scan is empty! Removing from database...') 38 ScansRepository.delete_scan_by_id(scan_id) 39 return 40 41 # At first, collect all Dicom images for given Scan 42 logger.info('Reading all Slices for this Scan... This may take a while...') 43 dicom_images = [] 44 for _slice in slices: 45 image = SlicesRepository.get_slice_original_image(_slice.id) 46 dicom_image, files_to_remove = _get_dicom_image(image) 47 dicom_images.append(dicom_image) 48 temp_files_to_remove.extend(files_to_remove) 49 50 # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation 51 logger.info('Converting each Slice in Z axis.') 52 for dicom_image, _slice in zip(dicom_images, slices): 53 slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image) 54 _convert_to_png_and_store(_slice, slice_pixels) 55 56 # Prepare a preview size and convert 3D scan to fit its max X's axis shape 57 logger.info('Normalizing Scan in 3D. This may take a while...') 58 normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE) 59 60 # Prepare Slices in other orientations 61 logger.info('Preparing Slices in other axis.') 62 _prepare_slices_in_y_orientation(normalized_scan, scan) 63 _prepare_slices_in_x_orientation(normalized_scan, scan) 64 65 logger.info('Marking whole Scan as converted.') 66 scan.mark_as_converted() 67 68 # Remove all temporarily created files for applying workaround 69 for file_name in temp_files_to_remove: 70 os.remove(file_name) 71 72 73 def _get_dicom_image(image: bytes) -> FileDataset: 74 """Return PyDICOM image based on image from HBase. 75 76 This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well. 77 78 :param image: bytes with DICOM image (eg. from HBase) 79 :return: PyDICOM Image 80 """ 81 # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start 82 temp_file_name = _create_temporary_file(image) 83 try: 84 dicom_image = pydicom.read_file(temp_file_name, force=True) 85 dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM... 86 return dicom_image, [temp_file_name] 87 except Exception: # pylint: disable=broad-except; Intended - too much cases to cover... 88 # In case of any Exception - try to uncompress data from DICOM first 89 temp_file_uncompressed = _create_temporary_file() 90 call(["gdcmconv", "--raw", "-i", temp_file_name, "-o", temp_file_uncompressed]) # Convert to RAW DICOMs 91 dicom_image = pydicom.read_file(temp_file_uncompressed, force=True) 92 return dicom_image, [temp_file_name, temp_file_uncompressed] 93 # UGLY WORKAROUND - Stop 94 95 96 def _create_temporary_file(image: Optional[bytes] = None) -> str: 97 """Create new temporary file based on given DICOM image. 98 99 This workaround enable support for compressed DICOMs that will be read by the GDCM 100 low-level library. Please remove this workaround as soon as this FIX ME notice 101 will be removed: 102 https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77 103 and this Issue will be closed: 104 https://github.com/pydicom/pydicom/issues/233 105 106 :param image: (optional) bytes with DICOM image 107 :return: path to temporary file 108 """ 109 with tempfile.NamedTemporaryFile(delete=False) as temp_file: 110 temp_file_name = temp_file.name 111 if image: 112 temp_file.write(image) 113 return temp_file_name 114 115 116 def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None: 117 """Prepare and save Slices in Y orientation. 118 119 :param normalized_scan: Numpy array with 3D normalized Scan 120 :param scan: Scan object to which new Slices should be added 121 """ 122 for y in range(normalized_scan.shape[1]): 123 location = 100.0 * y / normalized_scan.shape[1] 124 slice_pixels = normalized_scan[:, y, :] 125 _slice = scan.add_slice(SliceOrientation.Y) 126 _slice.update_location(location) 127 _convert_to_png_and_store(_slice, slice_pixels) 128 129 130 def _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None: 131 """Prepare and save Slices in Y orientation. 132 133 :param normalized_scan: Numpy array with 3D normalized Scan 134 :param scan: Scan object to which new Slices should be added 135 """ 136 for x in range(normalized_scan.shape[2]): 137 location = 100.0 * x / normalized_scan.shape[2] 138 slice_pixels = normalized_scan[:, :, x] 139 _slice = scan.add_slice(SliceOrientation.X) 140 _slice.update_location(location) 141 _convert_to_png_and_store(_slice, slice_pixels) 142 143 144 def _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None: 145 """Convert given Slice's pixel array and store in databases. 146 147 :param _slice: Slice database object 148 :param slice_pixels: numpy array with Slice data 149 """ 150 converted_image = _convert_slice_pixels_to_png(slice_pixels) 151 SlicesRepository.store_converted_image(_slice.id, converted_image) 152 _slice.mark_as_converted() 153 logger.info('%s converted and stored.', _slice) 154 155 156 def _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes: 157 """Convert given Slice's pixel array to the PNG format in bytes. 158 159 :param slice_pixels: Slice's pixel array 160 :return: bytes with Slice formatted in PNG 161 """ 162 png_image = io.BytesIO() 163 Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG') 164 png_image.seek(0) 165 return png_image.getvalue() 166 [end of backend/medtagger/workers/conversion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/medtagger/workers/conversion.py b/backend/medtagger/workers/conversion.py --- a/backend/medtagger/workers/conversion.py +++ b/backend/medtagger/workers/conversion.py @@ -20,6 +20,7 @@ logger = get_task_logger(__name__) +CONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices MAX_PREVIEW_X_SIZE = 256 @@ -47,20 +48,8 @@ dicom_images.append(dicom_image) temp_files_to_remove.extend(files_to_remove) - # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation - logger.info('Converting each Slice in Z axis.') - for dicom_image, _slice in zip(dicom_images, slices): - slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image) - _convert_to_png_and_store(_slice, slice_pixels) - - # Prepare a preview size and convert 3D scan to fit its max X's axis shape - logger.info('Normalizing Scan in 3D. This may take a while...') - normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE) - - # Prepare Slices in other orientations - logger.info('Preparing Slices in other axis.') - _prepare_slices_in_y_orientation(normalized_scan, scan) - _prepare_slices_in_x_orientation(normalized_scan, scan) + # Correlate Dicom files with Slices and convert all Slices + _convert_scan_in_all_axes(dicom_images, slices, scan) logger.info('Marking whole Scan as converted.') scan.mark_as_converted() @@ -113,6 +102,32 @@ return temp_file_name +def _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None: + """Convert Scan in X, Y and Z axes. + + NOTE: X & Y axes are now disabled (until Frontend will support it). + + :param dicom_images: list of all Dicom images + :param slices: list of all Slices in given Scan + :param scan: Scan object to which new Slices should be added + """ + logger.info('Converting each Slice in Z axis.') + for dicom_image, _slice in zip(dicom_images, slices): + slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image) + _convert_to_png_and_store(_slice, slice_pixels) + + # Convert only if it's enabled + if CONVERT_IN_OTHER_AXES: + # Prepare a preview size and convert 3D scan to fit its max X's axis shape + logger.info('Normalizing Scan in 3D. This may take a while...') + normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE) + + # Prepare Slices in other orientations + logger.info('Preparing Slices in other axis.') + _prepare_slices_in_y_orientation(normalized_scan, scan) + _prepare_slices_in_x_orientation(normalized_scan, scan) + + def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None: """Prepare and save Slices in Y orientation.
{"golden_diff": "diff --git a/backend/medtagger/workers/conversion.py b/backend/medtagger/workers/conversion.py\n--- a/backend/medtagger/workers/conversion.py\n+++ b/backend/medtagger/workers/conversion.py\n@@ -20,6 +20,7 @@\n \n logger = get_task_logger(__name__)\n \n+CONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices\n MAX_PREVIEW_X_SIZE = 256\n \n \n@@ -47,20 +48,8 @@\n dicom_images.append(dicom_image)\n temp_files_to_remove.extend(files_to_remove)\n \n- # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\n- logger.info('Converting each Slice in Z axis.')\n- for dicom_image, _slice in zip(dicom_images, slices):\n- slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n- _convert_to_png_and_store(_slice, slice_pixels)\n-\n- # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n- logger.info('Normalizing Scan in 3D. This may take a while...')\n- normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n-\n- # Prepare Slices in other orientations\n- logger.info('Preparing Slices in other axis.')\n- _prepare_slices_in_y_orientation(normalized_scan, scan)\n- _prepare_slices_in_x_orientation(normalized_scan, scan)\n+ # Correlate Dicom files with Slices and convert all Slices\n+ _convert_scan_in_all_axes(dicom_images, slices, scan)\n \n logger.info('Marking whole Scan as converted.')\n scan.mark_as_converted()\n@@ -113,6 +102,32 @@\n return temp_file_name\n \n \n+def _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None:\n+ \"\"\"Convert Scan in X, Y and Z axes.\n+\n+ NOTE: X & Y axes are now disabled (until Frontend will support it).\n+\n+ :param dicom_images: list of all Dicom images\n+ :param slices: list of all Slices in given Scan\n+ :param scan: Scan object to which new Slices should be added\n+ \"\"\"\n+ logger.info('Converting each Slice in Z axis.')\n+ for dicom_image, _slice in zip(dicom_images, slices):\n+ slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n+ _convert_to_png_and_store(_slice, slice_pixels)\n+\n+ # Convert only if it's enabled\n+ if CONVERT_IN_OTHER_AXES:\n+ # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n+ logger.info('Normalizing Scan in 3D. This may take a while...')\n+ normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n+\n+ # Prepare Slices in other orientations\n+ logger.info('Preparing Slices in other axis.')\n+ _prepare_slices_in_y_orientation(normalized_scan, scan)\n+ _prepare_slices_in_x_orientation(normalized_scan, scan)\n+\n+\n def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n", "issue": "Disable (temporarily) conversion in X & Y axes\n## Expected Behavior\r\n\r\nFrontend won't be enabled soon, so backend should disable such conversion (for now).\r\n\r\n## Actual Behavior\r\n\r\nBackend always create views from X & Y axes but are not used anywhere.\r\n\r\n## Additional comment\r\n\r\nLet's make it optional and controllable somehow by users (ideally from the UI & API side?).\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for asynchronous data conversion.\"\"\"\nimport io\nimport os\nimport tempfile\nfrom subprocess import call\nfrom typing import List, Optional\n\nimport numpy as np\nimport pydicom\nfrom pydicom.dataset import FileDataset\nfrom PIL import Image\nfrom celery.utils.log import get_task_logger\n\nfrom medtagger.types import ScanID\nfrom medtagger.workers import celery_app\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array\nfrom medtagger.database.models import SliceOrientation, Slice, Scan\nfrom medtagger.repositories.scans import ScansRepository\nfrom medtagger.repositories.slices import SlicesRepository\n\nlogger = get_task_logger(__name__)\n\nMAX_PREVIEW_X_SIZE = 256\n\n\n@celery_app.task\ndef convert_scan_to_png(scan_id: ScanID) -> None:\n \"\"\"Store Scan in HBase database.\n\n :param scan_id: ID of a Scan\n \"\"\"\n logger.info('Starting Scan (%s) conversion.', scan_id)\n temp_files_to_remove: List[str] = []\n scan = ScansRepository.get_scan_by_id(scan_id)\n slices = SlicesRepository.get_slices_by_scan_id(scan_id)\n if scan.declared_number_of_slices == 0:\n logger.error('This Scan is empty! Removing from database...')\n ScansRepository.delete_scan_by_id(scan_id)\n return\n\n # At first, collect all Dicom images for given Scan\n logger.info('Reading all Slices for this Scan... This may take a while...')\n dicom_images = []\n for _slice in slices:\n image = SlicesRepository.get_slice_original_image(_slice.id)\n dicom_image, files_to_remove = _get_dicom_image(image)\n dicom_images.append(dicom_image)\n temp_files_to_remove.extend(files_to_remove)\n\n # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\n logger.info('Converting each Slice in Z axis.')\n for dicom_image, _slice in zip(dicom_images, slices):\n slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n logger.info('Normalizing Scan in 3D. This may take a while...')\n normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n\n # Prepare Slices in other orientations\n logger.info('Preparing Slices in other axis.')\n _prepare_slices_in_y_orientation(normalized_scan, scan)\n _prepare_slices_in_x_orientation(normalized_scan, scan)\n\n logger.info('Marking whole Scan as converted.')\n scan.mark_as_converted()\n\n # Remove all temporarily created files for applying workaround\n for file_name in temp_files_to_remove:\n os.remove(file_name)\n\n\ndef _get_dicom_image(image: bytes) -> FileDataset:\n \"\"\"Return PyDICOM image based on image from HBase.\n\n This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well.\n\n :param image: bytes with DICOM image (eg. from HBase)\n :return: PyDICOM Image\n \"\"\"\n # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start\n temp_file_name = _create_temporary_file(image)\n try:\n dicom_image = pydicom.read_file(temp_file_name, force=True)\n dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM...\n return dicom_image, [temp_file_name]\n except Exception: # pylint: disable=broad-except; Intended - too much cases to cover...\n # In case of any Exception - try to uncompress data from DICOM first\n temp_file_uncompressed = _create_temporary_file()\n call([\"gdcmconv\", \"--raw\", \"-i\", temp_file_name, \"-o\", temp_file_uncompressed]) # Convert to RAW DICOMs\n dicom_image = pydicom.read_file(temp_file_uncompressed, force=True)\n return dicom_image, [temp_file_name, temp_file_uncompressed]\n # UGLY WORKAROUND - Stop\n\n\ndef _create_temporary_file(image: Optional[bytes] = None) -> str:\n \"\"\"Create new temporary file based on given DICOM image.\n\n This workaround enable support for compressed DICOMs that will be read by the GDCM\n low-level library. Please remove this workaround as soon as this FIX ME notice\n will be removed:\n https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77\n and this Issue will be closed:\n https://github.com/pydicom/pydicom/issues/233\n\n :param image: (optional) bytes with DICOM image\n :return: path to temporary file\n \"\"\"\n with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n temp_file_name = temp_file.name\n if image:\n temp_file.write(image)\n return temp_file_name\n\n\ndef _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n\n :param normalized_scan: Numpy array with 3D normalized Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n for y in range(normalized_scan.shape[1]):\n location = 100.0 * y / normalized_scan.shape[1]\n slice_pixels = normalized_scan[:, y, :]\n _slice = scan.add_slice(SliceOrientation.Y)\n _slice.update_location(location)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n\ndef _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n\n :param normalized_scan: Numpy array with 3D normalized Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n for x in range(normalized_scan.shape[2]):\n location = 100.0 * x / normalized_scan.shape[2]\n slice_pixels = normalized_scan[:, :, x]\n _slice = scan.add_slice(SliceOrientation.X)\n _slice.update_location(location)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n\ndef _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None:\n \"\"\"Convert given Slice's pixel array and store in databases.\n\n :param _slice: Slice database object\n :param slice_pixels: numpy array with Slice data\n \"\"\"\n converted_image = _convert_slice_pixels_to_png(slice_pixels)\n SlicesRepository.store_converted_image(_slice.id, converted_image)\n _slice.mark_as_converted()\n logger.info('%s converted and stored.', _slice)\n\n\ndef _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes:\n \"\"\"Convert given Slice's pixel array to the PNG format in bytes.\n\n :param slice_pixels: Slice's pixel array\n :return: bytes with Slice formatted in PNG\n \"\"\"\n png_image = io.BytesIO()\n Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG')\n png_image.seek(0)\n return png_image.getvalue()\n", "path": "backend/medtagger/workers/conversion.py"}]}
2,635
766
gh_patches_debug_39711
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-824
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> VideoClassificationData.from_files() does not work ## 🐛 Bug `VideoClassificationData.from_files()` does not work. `VideoClassificationData` objects can only be constructed using the `from_folders()` classmethod - which unfortunately doesn't work for multilabel tasks :) ### To Reproduce I wrote a Colab notebook to reproduce this in a self-contained environment: https://colab.research.google.com/drive/1X7UvZDndCc0dzcUZ_fGdmQz0ZMTTsj_U?usp=sharing #### Code sample See link to colab above ### Expected behavior A `VideoClassificationData` object should be constructed from a list of paths and their labels </issue> <code> [start of flash/video/classification/data.py] 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import pathlib 15 from typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union 16 17 import numpy as np 18 import torch 19 from pytorch_lightning.utilities.exceptions import MisconfigurationException 20 from torch.utils.data import Sampler 21 22 from flash.core.data.data_module import DataModule 23 from flash.core.data.data_source import ( 24 DefaultDataKeys, 25 DefaultDataSources, 26 FiftyOneDataSource, 27 LabelsState, 28 PathsDataSource, 29 ) 30 from flash.core.data.process import Preprocess 31 from flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource 32 from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import 33 34 SampleCollection = None 35 if _FIFTYONE_AVAILABLE: 36 fol = lazy_import("fiftyone.core.labels") 37 if TYPE_CHECKING: 38 from fiftyone.core.collections import SampleCollection 39 else: 40 fol = None 41 42 if _KORNIA_AVAILABLE: 43 import kornia.augmentation as K 44 45 if _PYTORCHVIDEO_AVAILABLE: 46 from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler 47 from pytorchvideo.data.encoded_video import EncodedVideo 48 from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset 49 from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths 50 from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample 51 from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip 52 else: 53 ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None 54 55 _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]] 56 57 58 class BaseVideoClassification: 59 def __init__( 60 self, 61 clip_sampler: "ClipSampler", 62 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler, 63 decode_audio: bool = True, 64 decoder: str = "pyav", 65 ): 66 self.clip_sampler = clip_sampler 67 self.video_sampler = video_sampler 68 self.decode_audio = decode_audio 69 self.decoder = decoder 70 71 def load_data(self, data: str, dataset: Optional[Any] = None) -> "LabeledVideoDataset": 72 ds = self._make_encoded_video_dataset(data) 73 if self.training: 74 label_to_class_mapping = {p[1]: p[0].split("/")[-2] for p in ds._labeled_videos._paths_and_labels} 75 self.set_state(LabelsState(label_to_class_mapping)) 76 dataset.num_classes = len(np.unique([s[1]["label"] for s in ds._labeled_videos])) 77 return ds 78 79 def load_sample(self, sample): 80 return sample 81 82 def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]: 83 video_path = sample[DefaultDataKeys.INPUT] 84 sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path))) 85 sample[DefaultDataKeys.METADATA] = {"filepath": video_path} 86 return sample 87 88 def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: 89 ( 90 clip_start, 91 clip_end, 92 clip_index, 93 aug_index, 94 is_last_clip, 95 ) = self.clip_sampler(0.0, video.duration, annotation) 96 97 loaded_clip = video.get_clip(clip_start, clip_end) 98 99 clip_is_null = ( 100 loaded_clip is None or loaded_clip["video"] is None or (loaded_clip["audio"] is None and self.decode_audio) 101 ) 102 103 if clip_is_null: 104 raise MisconfigurationException( 105 f"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}" 106 ) 107 108 frames = loaded_clip["video"] 109 audio_samples = loaded_clip["audio"] 110 return { 111 "video": frames, 112 "video_name": video.name, 113 "video_index": 0, 114 "clip_index": clip_index, 115 "aug_index": aug_index, 116 **({"audio": audio_samples} if audio_samples is not None else {}), 117 } 118 119 def _make_encoded_video_dataset(self, data) -> "LabeledVideoDataset": 120 raise NotImplementedError("Subclass must implement _make_encoded_video_dataset()") 121 122 123 class VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource): 124 def __init__( 125 self, 126 clip_sampler: "ClipSampler", 127 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler, 128 decode_audio: bool = True, 129 decoder: str = "pyav", 130 ): 131 super().__init__( 132 clip_sampler, 133 video_sampler=video_sampler, 134 decode_audio=decode_audio, 135 decoder=decoder, 136 ) 137 PathsDataSource.__init__( 138 self, 139 extensions=("mp4", "avi"), 140 ) 141 142 def _make_encoded_video_dataset(self, data) -> "LabeledVideoDataset": 143 ds: LabeledVideoDataset = labeled_video_dataset( 144 pathlib.Path(data), 145 self.clip_sampler, 146 video_sampler=self.video_sampler, 147 decode_audio=self.decode_audio, 148 decoder=self.decoder, 149 ) 150 return ds 151 152 153 class VideoClassificationFiftyOneDataSource( 154 BaseVideoClassification, 155 FiftyOneDataSource, 156 ): 157 def __init__( 158 self, 159 clip_sampler: "ClipSampler", 160 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler, 161 decode_audio: bool = True, 162 decoder: str = "pyav", 163 label_field: str = "ground_truth", 164 ): 165 super().__init__( 166 clip_sampler=clip_sampler, 167 video_sampler=video_sampler, 168 decode_audio=decode_audio, 169 decoder=decoder, 170 ) 171 FiftyOneDataSource.__init__( 172 self, 173 label_field=label_field, 174 ) 175 176 @property 177 def label_cls(self): 178 return fol.Classification 179 180 def _make_encoded_video_dataset(self, data: SampleCollection) -> "LabeledVideoDataset": 181 classes = self._get_classes(data) 182 label_to_class_mapping = dict(enumerate(classes)) 183 class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()} 184 185 filepaths = data.values("filepath") 186 labels = data.values(self.label_field + ".label") 187 targets = [class_to_label_mapping[lab] for lab in labels] 188 labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets))) 189 190 ds: LabeledVideoDataset = LabeledVideoDataset( 191 labeled_video_paths, 192 self.clip_sampler, 193 video_sampler=self.video_sampler, 194 decode_audio=self.decode_audio, 195 decoder=self.decoder, 196 ) 197 return ds 198 199 200 class VideoClassificationPreprocess(Preprocess): 201 def __init__( 202 self, 203 train_transform: Optional[Dict[str, Callable]] = None, 204 val_transform: Optional[Dict[str, Callable]] = None, 205 test_transform: Optional[Dict[str, Callable]] = None, 206 predict_transform: Optional[Dict[str, Callable]] = None, 207 clip_sampler: Union[str, "ClipSampler"] = "random", 208 clip_duration: float = 2, 209 clip_sampler_kwargs: Dict[str, Any] = None, 210 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler, 211 decode_audio: bool = True, 212 decoder: str = "pyav", 213 **data_source_kwargs: Any, 214 ): 215 self.clip_sampler = clip_sampler 216 self.clip_duration = clip_duration 217 self.clip_sampler_kwargs = clip_sampler_kwargs 218 self.video_sampler = video_sampler 219 self.decode_audio = decode_audio 220 self.decoder = decoder 221 222 if not _PYTORCHVIDEO_AVAILABLE: 223 raise ModuleNotFoundError("Please, run `pip install pytorchvideo`.") 224 225 if not clip_sampler_kwargs: 226 clip_sampler_kwargs = {} 227 228 if not clip_sampler: 229 raise MisconfigurationException( 230 "clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``" 231 ) 232 233 clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs) 234 235 super().__init__( 236 train_transform=train_transform, 237 val_transform=val_transform, 238 test_transform=test_transform, 239 predict_transform=predict_transform, 240 data_sources={ 241 DefaultDataSources.FILES: VideoClassificationPathsDataSource( 242 clip_sampler, 243 video_sampler=video_sampler, 244 decode_audio=decode_audio, 245 decoder=decoder, 246 ), 247 DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource( 248 clip_sampler, 249 video_sampler=video_sampler, 250 decode_audio=decode_audio, 251 decoder=decoder, 252 ), 253 DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource( 254 clip_sampler, 255 video_sampler=video_sampler, 256 decode_audio=decode_audio, 257 decoder=decoder, 258 **data_source_kwargs, 259 ), 260 DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource( 261 clip_sampler=clip_sampler, 262 video_sampler=video_sampler, 263 decode_audio=decode_audio, 264 decoder=decoder, 265 **data_source_kwargs, 266 ), 267 }, 268 default_data_source=DefaultDataSources.FILES, 269 ) 270 271 def get_state_dict(self) -> Dict[str, Any]: 272 return { 273 **self.transforms, 274 "clip_sampler": self.clip_sampler, 275 "clip_duration": self.clip_duration, 276 "clip_sampler_kwargs": self.clip_sampler_kwargs, 277 "video_sampler": self.video_sampler, 278 "decode_audio": self.decode_audio, 279 "decoder": self.decoder, 280 } 281 282 @classmethod 283 def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> "VideoClassificationPreprocess": 284 return cls(**state_dict) 285 286 def default_transforms(self) -> Dict[str, Callable]: 287 if self.training: 288 post_tensor_transform = [ 289 RandomCrop(244, pad_if_needed=True), 290 RandomHorizontalFlip(p=0.5), 291 ] 292 else: 293 post_tensor_transform = [ 294 CenterCrop(244), 295 ] 296 297 return { 298 "post_tensor_transform": Compose( 299 [ 300 ApplyTransformToKey( 301 key="video", 302 transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform), 303 ), 304 ] 305 ), 306 "per_batch_transform_on_device": Compose( 307 [ 308 ApplyTransformToKey( 309 key="video", 310 transform=K.VideoSequential( 311 K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])), 312 data_format="BCTHW", 313 same_on_frame=False, 314 ), 315 ), 316 ] 317 ), 318 } 319 320 321 class VideoClassificationData(DataModule): 322 """Data module for Video classification tasks.""" 323 324 preprocess_cls = VideoClassificationPreprocess 325 [end of flash/video/classification/data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flash/video/classification/data.py b/flash/video/classification/data.py --- a/flash/video/classification/data.py +++ b/flash/video/classification/data.py @@ -54,6 +54,8 @@ _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]] +Label = Union[int, List[int]] + class BaseVideoClassification: def __init__( @@ -150,6 +152,76 @@ return ds +class VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource): + def __init__( + self, + clip_sampler: "ClipSampler", + video_sampler: Type[Sampler] = torch.utils.data.RandomSampler, + decode_audio: bool = True, + decoder: str = "pyav", + ): + super().__init__( + clip_sampler, + video_sampler=video_sampler, + decode_audio=decode_audio, + decoder=decoder, + ) + PathsDataSource.__init__( + self, + extensions=("mp4", "avi"), + ) + + def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor: + v = torch.zeros(len(self.labels_set)) + for label in label_list: + v[label] = 1 + return v + + def _make_encoded_video_dataset(self, data) -> "LabeledVideoDataset": + [paths, str_labels] = data + self.is_multilabel = any(isinstance(label, list) for label in str_labels) + if self.is_multilabel: + self.labels_set = {label for label_list in str_labels for label in label_list} + self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))} + self.id_to_label = {i: label for label, i in self.label_to_id.items()} + + encoded_labels = [ + self._to_multi_hot([self.label_to_id[classname] for classname in label_list]) + for label_list in str_labels + ] + + data = list( + zip( + paths, + encoded_labels, + ) + ) + else: + self.labels_set = set(str_labels) + self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))} + self.id_to_label = {i: label for label, i in self.label_to_id.items()} + data = list( + zip(paths, [self.label_to_id[classname] for classname in str_labels]) + ) # List[Lists] -> List[Tuples] + labeled_video_paths = LabeledVideoPaths(data) + ds = LabeledVideoDataset( + labeled_video_paths, + self.clip_sampler, + video_sampler=self.video_sampler, + decode_audio=self.decode_audio, + decoder=self.decoder, + ) + return ds + + def load_data(self, data: str, dataset: Optional[Any] = None) -> "LabeledVideoDataset": + ds = self._make_encoded_video_dataset(data) + + if self.training: + self.set_state(LabelsState(self.id_to_label)) + dataset.num_classes = len(self.labels_set) + return ds + + class VideoClassificationFiftyOneDataSource( BaseVideoClassification, FiftyOneDataSource, @@ -238,7 +310,7 @@ test_transform=test_transform, predict_transform=predict_transform, data_sources={ - DefaultDataSources.FILES: VideoClassificationPathsDataSource( + DefaultDataSources.FILES: VideoClassificationListDataSource( clip_sampler, video_sampler=video_sampler, decode_audio=decode_audio,
{"golden_diff": "diff --git a/flash/video/classification/data.py b/flash/video/classification/data.py\n--- a/flash/video/classification/data.py\n+++ b/flash/video/classification/data.py\n@@ -54,6 +54,8 @@\n \n _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n \n+Label = Union[int, List[int]]\n+\n \n class BaseVideoClassification:\n def __init__(\n@@ -150,6 +152,76 @@\n return ds\n \n \n+class VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource):\n+ def __init__(\n+ self,\n+ clip_sampler: \"ClipSampler\",\n+ video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n+ decode_audio: bool = True,\n+ decoder: str = \"pyav\",\n+ ):\n+ super().__init__(\n+ clip_sampler,\n+ video_sampler=video_sampler,\n+ decode_audio=decode_audio,\n+ decoder=decoder,\n+ )\n+ PathsDataSource.__init__(\n+ self,\n+ extensions=(\"mp4\", \"avi\"),\n+ )\n+\n+ def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:\n+ v = torch.zeros(len(self.labels_set))\n+ for label in label_list:\n+ v[label] = 1\n+ return v\n+\n+ def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n+ [paths, str_labels] = data\n+ self.is_multilabel = any(isinstance(label, list) for label in str_labels)\n+ if self.is_multilabel:\n+ self.labels_set = {label for label_list in str_labels for label in label_list}\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n+\n+ encoded_labels = [\n+ self._to_multi_hot([self.label_to_id[classname] for classname in label_list])\n+ for label_list in str_labels\n+ ]\n+\n+ data = list(\n+ zip(\n+ paths,\n+ encoded_labels,\n+ )\n+ )\n+ else:\n+ self.labels_set = set(str_labels)\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n+ data = list(\n+ zip(paths, [self.label_to_id[classname] for classname in str_labels])\n+ ) # List[Lists] -> List[Tuples]\n+ labeled_video_paths = LabeledVideoPaths(data)\n+ ds = LabeledVideoDataset(\n+ labeled_video_paths,\n+ self.clip_sampler,\n+ video_sampler=self.video_sampler,\n+ decode_audio=self.decode_audio,\n+ decoder=self.decoder,\n+ )\n+ return ds\n+\n+ def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n+ ds = self._make_encoded_video_dataset(data)\n+\n+ if self.training:\n+ self.set_state(LabelsState(self.id_to_label))\n+ dataset.num_classes = len(self.labels_set)\n+ return ds\n+\n+\n class VideoClassificationFiftyOneDataSource(\n BaseVideoClassification,\n FiftyOneDataSource,\n@@ -238,7 +310,7 @@\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n- DefaultDataSources.FILES: VideoClassificationPathsDataSource(\n+ DefaultDataSources.FILES: VideoClassificationListDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n", "issue": "VideoClassificationData.from_files() does not work\n## \ud83d\udc1b Bug\r\n`VideoClassificationData.from_files()` does not work. `VideoClassificationData` objects can only be constructed using the `from_folders()` classmethod - which unfortunately doesn't work for multilabel tasks :)\r\n\r\n### To Reproduce\r\nI wrote a Colab notebook to reproduce this in a self-contained environment: https://colab.research.google.com/drive/1X7UvZDndCc0dzcUZ_fGdmQz0ZMTTsj_U?usp=sharing\r\n\r\n#### Code sample\r\nSee link to colab above\r\n\r\n### Expected behavior\r\nA `VideoClassificationData` object should be constructed from a list of paths and their labels\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\nfrom typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union\n\nimport numpy as np\nimport torch\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import Sampler\n\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_source import (\n DefaultDataKeys,\n DefaultDataSources,\n FiftyOneDataSource,\n LabelsState,\n PathsDataSource,\n)\nfrom flash.core.data.process import Preprocess\nfrom flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import\n\nSampleCollection = None\nif _FIFTYONE_AVAILABLE:\n fol = lazy_import(\"fiftyone.core.labels\")\n if TYPE_CHECKING:\n from fiftyone.core.collections import SampleCollection\nelse:\n fol = None\n\nif _KORNIA_AVAILABLE:\n import kornia.augmentation as K\n\nif _PYTORCHVIDEO_AVAILABLE:\n from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler\n from pytorchvideo.data.encoded_video import EncodedVideo\n from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset\n from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths\n from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample\n from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip\nelse:\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n\n_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n\n\nclass BaseVideoClassification:\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n self.clip_sampler = clip_sampler\n self.video_sampler = video_sampler\n self.decode_audio = decode_audio\n self.decoder = decoder\n\n def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n ds = self._make_encoded_video_dataset(data)\n if self.training:\n label_to_class_mapping = {p[1]: p[0].split(\"/\")[-2] for p in ds._labeled_videos._paths_and_labels}\n self.set_state(LabelsState(label_to_class_mapping))\n dataset.num_classes = len(np.unique([s[1][\"label\"] for s in ds._labeled_videos]))\n return ds\n\n def load_sample(self, sample):\n return sample\n\n def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n video_path = sample[DefaultDataKeys.INPUT]\n sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path)))\n sample[DefaultDataKeys.METADATA] = {\"filepath\": video_path}\n return sample\n\n def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\n (\n clip_start,\n clip_end,\n clip_index,\n aug_index,\n is_last_clip,\n ) = self.clip_sampler(0.0, video.duration, annotation)\n\n loaded_clip = video.get_clip(clip_start, clip_end)\n\n clip_is_null = (\n loaded_clip is None or loaded_clip[\"video\"] is None or (loaded_clip[\"audio\"] is None and self.decode_audio)\n )\n\n if clip_is_null:\n raise MisconfigurationException(\n f\"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}\"\n )\n\n frames = loaded_clip[\"video\"]\n audio_samples = loaded_clip[\"audio\"]\n return {\n \"video\": frames,\n \"video_name\": video.name,\n \"video_index\": 0,\n \"clip_index\": clip_index,\n \"aug_index\": aug_index,\n **({\"audio\": audio_samples} if audio_samples is not None else {}),\n }\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n raise NotImplementedError(\"Subclass must implement _make_encoded_video_dataset()\")\n\n\nclass VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n super().__init__(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n PathsDataSource.__init__(\n self,\n extensions=(\"mp4\", \"avi\"),\n )\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n ds: LabeledVideoDataset = labeled_video_dataset(\n pathlib.Path(data),\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n\nclass VideoClassificationFiftyOneDataSource(\n BaseVideoClassification,\n FiftyOneDataSource,\n):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n label_field: str = \"ground_truth\",\n ):\n super().__init__(\n clip_sampler=clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n FiftyOneDataSource.__init__(\n self,\n label_field=label_field,\n )\n\n @property\n def label_cls(self):\n return fol.Classification\n\n def _make_encoded_video_dataset(self, data: SampleCollection) -> \"LabeledVideoDataset\":\n classes = self._get_classes(data)\n label_to_class_mapping = dict(enumerate(classes))\n class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}\n\n filepaths = data.values(\"filepath\")\n labels = data.values(self.label_field + \".label\")\n targets = [class_to_label_mapping[lab] for lab in labels]\n labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets)))\n\n ds: LabeledVideoDataset = LabeledVideoDataset(\n labeled_video_paths,\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n\nclass VideoClassificationPreprocess(Preprocess):\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n clip_sampler: Union[str, \"ClipSampler\"] = \"random\",\n clip_duration: float = 2,\n clip_sampler_kwargs: Dict[str, Any] = None,\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n **data_source_kwargs: Any,\n ):\n self.clip_sampler = clip_sampler\n self.clip_duration = clip_duration\n self.clip_sampler_kwargs = clip_sampler_kwargs\n self.video_sampler = video_sampler\n self.decode_audio = decode_audio\n self.decoder = decoder\n\n if not _PYTORCHVIDEO_AVAILABLE:\n raise ModuleNotFoundError(\"Please, run `pip install pytorchvideo`.\")\n\n if not clip_sampler_kwargs:\n clip_sampler_kwargs = {}\n\n if not clip_sampler:\n raise MisconfigurationException(\n \"clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``\"\n )\n\n clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)\n\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n DefaultDataSources.FILES: VideoClassificationPathsDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n ),\n DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n ),\n DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n **data_source_kwargs,\n ),\n DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource(\n clip_sampler=clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n **data_source_kwargs,\n ),\n },\n default_data_source=DefaultDataSources.FILES,\n )\n\n def get_state_dict(self) -> Dict[str, Any]:\n return {\n **self.transforms,\n \"clip_sampler\": self.clip_sampler,\n \"clip_duration\": self.clip_duration,\n \"clip_sampler_kwargs\": self.clip_sampler_kwargs,\n \"video_sampler\": self.video_sampler,\n \"decode_audio\": self.decode_audio,\n \"decoder\": self.decoder,\n }\n\n @classmethod\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> \"VideoClassificationPreprocess\":\n return cls(**state_dict)\n\n def default_transforms(self) -> Dict[str, Callable]:\n if self.training:\n post_tensor_transform = [\n RandomCrop(244, pad_if_needed=True),\n RandomHorizontalFlip(p=0.5),\n ]\n else:\n post_tensor_transform = [\n CenterCrop(244),\n ]\n\n return {\n \"post_tensor_transform\": Compose(\n [\n ApplyTransformToKey(\n key=\"video\",\n transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform),\n ),\n ]\n ),\n \"per_batch_transform_on_device\": Compose(\n [\n ApplyTransformToKey(\n key=\"video\",\n transform=K.VideoSequential(\n K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),\n data_format=\"BCTHW\",\n same_on_frame=False,\n ),\n ),\n ]\n ),\n }\n\n\nclass VideoClassificationData(DataModule):\n \"\"\"Data module for Video classification tasks.\"\"\"\n\n preprocess_cls = VideoClassificationPreprocess\n", "path": "flash/video/classification/data.py"}]}
3,998
818
gh_patches_debug_33817
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> opentelemetry-instrument command fails if incompatible instrumentation is found If an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception: ``` ❯ opentelemetry-instrument python main.py Instrumenting of flask failed Traceback (most recent call last): File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 71, in _load_instrumentors conflict = get_dist_dependency_conflicts(entry_point.dist) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 33, in get_dist_dependency_conflicts return get_dependency_conflicts(deps) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 41, in get_dependency_conflicts get_distribution(str(dep)) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 482, in get_distribution dist = get_provider(dist) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 358, in get_provider return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] IndexError: list index out of range Failed to auto initialize opentelemetry Traceback (most recent call last): File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 111, in initialize _load_instrumentors(distro) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 85, in _load_instrumentors raise exc File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 71, in _load_instrumentors conflict = get_dist_dependency_conflicts(entry_point.dist) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 33, in get_dist_dependency_conflicts return get_dependency_conflicts(deps) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 41, in get_dependency_conflicts get_distribution(str(dep)) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 482, in get_distribution dist = get_provider(dist) File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 358, in get_provider return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] IndexError: list index out of range ``` bootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations. </issue> <code> [start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py] 1 from typing import Collection, Optional 2 3 from pkg_resources import ( 4 Distribution, 5 DistributionNotFound, 6 VersionConflict, 7 get_distribution, 8 ) 9 10 11 class DependencyConflict: 12 required: str = None 13 found: Optional[str] = None 14 15 def __init__(self, required, found=None): 16 self.required = required 17 self.found = found 18 19 def __str__(self): 20 return 'DependencyConflict: requested: "{0}" but found: "{1}"'.format( 21 self.required, self.found 22 ) 23 24 25 def get_dist_dependency_conflicts( 26 dist: Distribution, 27 ) -> Optional[DependencyConflict]: 28 deps = [ 29 dep 30 for dep in dist.requires(("instruments",)) 31 if dep not in dist.requires() 32 ] 33 return get_dependency_conflicts(deps) 34 35 36 def get_dependency_conflicts( 37 deps: Collection[str], 38 ) -> Optional[DependencyConflict]: 39 for dep in deps: 40 try: 41 get_distribution(str(dep)) 42 except VersionConflict as exc: 43 return DependencyConflict(dep, exc.dist) 44 except DistributionNotFound: 45 return DependencyConflict(dep) 46 return None 47 [end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py @@ -1,12 +1,16 @@ +from logging import getLogger from typing import Collection, Optional from pkg_resources import ( Distribution, DistributionNotFound, + RequirementParseError, VersionConflict, get_distribution, ) +logger = getLogger(__file__) + class DependencyConflict: required: str = None @@ -25,12 +29,19 @@ def get_dist_dependency_conflicts( dist: Distribution, ) -> Optional[DependencyConflict]: - deps = [ - dep - for dep in dist.requires(("instruments",)) - if dep not in dist.requires() - ] - return get_dependency_conflicts(deps) + main_deps = dist.requires() + instrumentation_deps = [] + for dep in dist.requires(("instruments",)): + if dep not in main_deps: + # we set marker to none so string representation of the dependency looks like + # requests ~= 1.0 + # instead of + # requests ~= 1.0; extra = "instruments" + # which does not work with `get_distribution()` + dep.marker = None + instrumentation_deps.append(str(dep)) + + return get_dependency_conflicts(instrumentation_deps) def get_dependency_conflicts( @@ -38,9 +49,16 @@ ) -> Optional[DependencyConflict]: for dep in deps: try: - get_distribution(str(dep)) + get_distribution(dep) except VersionConflict as exc: return DependencyConflict(dep, exc.dist) except DistributionNotFound: return DependencyConflict(dep) + except RequirementParseError as exc: + logger.warning( + 'error parsing dependency, reporting as a conflict: "%s" - %s', + dep, + exc, + ) + return DependencyConflict(dep) return None
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n@@ -1,12 +1,16 @@\n+from logging import getLogger\n from typing import Collection, Optional\n \n from pkg_resources import (\n Distribution,\n DistributionNotFound,\n+ RequirementParseError,\n VersionConflict,\n get_distribution,\n )\n \n+logger = getLogger(__file__)\n+\n \n class DependencyConflict:\n required: str = None\n@@ -25,12 +29,19 @@\n def get_dist_dependency_conflicts(\n dist: Distribution,\n ) -> Optional[DependencyConflict]:\n- deps = [\n- dep\n- for dep in dist.requires((\"instruments\",))\n- if dep not in dist.requires()\n- ]\n- return get_dependency_conflicts(deps)\n+ main_deps = dist.requires()\n+ instrumentation_deps = []\n+ for dep in dist.requires((\"instruments\",)):\n+ if dep not in main_deps:\n+ # we set marker to none so string representation of the dependency looks like\n+ # requests ~= 1.0\n+ # instead of\n+ # requests ~= 1.0; extra = \"instruments\"\n+ # which does not work with `get_distribution()`\n+ dep.marker = None\n+ instrumentation_deps.append(str(dep))\n+\n+ return get_dependency_conflicts(instrumentation_deps)\n \n \n def get_dependency_conflicts(\n@@ -38,9 +49,16 @@\n ) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n- get_distribution(str(dep))\n+ get_distribution(dep)\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n+ except RequirementParseError as exc:\n+ logger.warning(\n+ 'error parsing dependency, reporting as a conflict: \"%s\" - %s',\n+ dep,\n+ exc,\n+ )\n+ return DependencyConflict(dep)\n return None\n", "issue": "opentelemetry-instrument command fails if incompatible instrumentation is found\nIf an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:\r\n\r\n\r\n```\r\n\u276f opentelemetry-instrument python main.py\r\nInstrumenting of flask failed\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\nFailed to auto initialize opentelemetry\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 111, in initialize\r\n _load_instrumentors(distro)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 85, in _load_instrumentors\r\n raise exc\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\n```\r\n\r\nbootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.\n", "before_files": [{"content": "from typing import Collection, Optional\n\nfrom pkg_resources import (\n Distribution,\n DistributionNotFound,\n VersionConflict,\n get_distribution,\n)\n\n\nclass DependencyConflict:\n required: str = None\n found: Optional[str] = None\n\n def __init__(self, required, found=None):\n self.required = required\n self.found = found\n\n def __str__(self):\n return 'DependencyConflict: requested: \"{0}\" but found: \"{1}\"'.format(\n self.required, self.found\n )\n\n\ndef get_dist_dependency_conflicts(\n dist: Distribution,\n) -> Optional[DependencyConflict]:\n deps = [\n dep\n for dep in dist.requires((\"instruments\",))\n if dep not in dist.requires()\n ]\n return get_dependency_conflicts(deps)\n\n\ndef get_dependency_conflicts(\n deps: Collection[str],\n) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n get_distribution(str(dep))\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n return None\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py"}]}
1,741
483
gh_patches_debug_8851
rasdani/github-patches
git_diff
yt-dlp__yt-dlp-3789
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> StreamCZ extractor broken ### Checklist - [X] I'm reporting a broken site - [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit) - [X] I've checked that all provided URLs are alive and playable in a browser - [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command) - [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates - [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue) - [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required ### Region world ### Description Broken support for stream.cz Example: https://www.stream.cz/autobazar/vecerni-auto-271357 ### Verbose log ```shell C:\Users\xxx>yt-dlp.lnk https://www.stream.cz/autobazar/vecerni-auto-271357 -U -v [debug] Command-line config: ['https://www.stream.cz/autobazar/vecerni-auto-271357', '-U', '-v'] [debug] User config "C:\Users\xxx\yt-dlp.conf": ['--user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36', '--no-check-certificate', '-o', 'D:\\Downloads\\yt-dlp\\%(autonumber)03d_%(title)s.%(ext)s', '--no-mtime', '--format', 'bestvideo[ext=mp4][height<=1080][fps<=30]+bestaudio[ext=m4a]/best[ext=mp4][height<=1080][fps<=30]/best[height<=1080][fps<=30]/best', '--merge-output-format', 'mkv', '--ffmpeg-location', 'C:\\Apps\\ffmpeg\\bin\\ffmpeg.exe', '--all-subs', '-i', '--add-metadata', '--remux-video', 'mkv', '--add-header', 'accept-language: cs,fi;q=0.9,en-US;q=0.8,en;q=0.7,en-GB;q=0.6', '--add-header', 'sec-ch-ua: "Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"', '--add-header', 'sec-ch-ua-arch: "x86"', '--add-header', 'sec-ch-ua-bitness: "64"', '--add-header', 'sec-ch-ua-full-version: "94.0.4606.71"', '--add-header', 'sec-ch-ua-mobile: ?0', '--add-header', 'sec-ch-ua-model: ""', '--add-header', 'sec-ch-ua-platform: "Windows"', '--add-header', 'sec-ch-ua-platform-version: "10.0.0"', '--add-header', 'sec-fetch-dest: document', '--add-header', 'sec-fetch-mode: navigate', '--add-header', 'sec-fetch-site: none', '--add-header', 'sec-fetch-user: ?1', '--add-header', 'service-worker-navigation-preload: true', '--add-header', 'upgrade-insecure-requests: 1', '--add-header', 'alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000,h3-T051=":443"; ma=2592000,h3-Q050=":443"; ma=2592000,h3-Q046=":443"; ma=2592000,h3-Q043=":443"; ma=2592000,quic=":443"; ma=2592000; v="46,43"', '--add-header', 'cache-control: no-cache, no-store, max-age=0, must-revalidate'] [debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252 [debug] yt-dlp version 2022.04.08 [7884ade] (win_exe) [debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19044-SP0 [debug] Checking exe version: "C:\Apps\ffmpeg\bin\ffmpeg.exe" -bsfs [debug] Checking exe version: "C:\Apps\ffmpeg\bin\ffprobe" -bsfs [debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev [debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets [debug] Proxy map: {} Latest version: 2022.04.08, Current version: 2022.04.08 yt-dlp is up to date (2022.04.08) [debug] [StreamCZ] Extracting URL: https://www.stream.cz/autobazar/vecerni-auto-271357 [StreamCZ] 271357: Downloading GraphQL result [StreamCZ] 271357: Downloading playlist ERROR: 'NoneType' object has no attribute 'items' Traceback (most recent call last): File "yt_dlp\YoutubeDL.py", line 1408, in wrapper File "yt_dlp\YoutubeDL.py", line 1478, in __extract_info File "yt_dlp\extractor\common.py", line 641, in extract File "yt_dlp\extractor\streamcz.py", line 112, in _real_extract File "yt_dlp\extractor\streamcz.py", line 58, in _extract_formats AttributeError: 'NoneType' object has no attribute 'items' ``` </issue> <code> [start of yt_dlp/extractor/streamcz.py] 1 import json 2 3 from .common import InfoExtractor 4 from ..utils import ( 5 float_or_none, 6 int_or_none, 7 parse_codecs, 8 traverse_obj, 9 urljoin, 10 ) 11 12 13 class StreamCZIE(InfoExtractor): 14 _VALID_URL = r'https?://(?:www\.)?(?:stream|televizeseznam)\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)' 15 _TESTS = [{ 16 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890', 17 'md5': '40c41ade1464a390a0b447e333df4239', 18 'info_dict': { 19 'id': '57953890', 20 'ext': 'mp4', 21 'title': 'Bůh', 22 'display_id': 'buh', 23 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165', 24 'duration': 1369.6, 25 'view_count': int, 26 } 27 }, { 28 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937', 29 'md5': '41fd358000086a1ccdb068c77809b158', 30 'info_dict': { 31 'id': '64087937', 32 'ext': 'mp4', 33 'title': 'Kdo to mluví? Velké odhalení přináší nový pořad už od 25. srpna', 34 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna', 35 'description': 'md5:97a811000a6460266029d6c1c2ebcd59', 36 'duration': 50.2, 37 'view_count': int, 38 } 39 }, { 40 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267', 41 'md5': '3ee4d0be040e8f4a543e67e509d55e3f', 42 'info_dict': { 43 'id': '64147267', 44 'ext': 'mp4', 45 'title': 'Zničehonic jim skrz střechu prolítnul záhadný předmět. Badatelé vše objasnili', 46 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili', 47 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf', 48 'duration': 442.84, 49 'view_count': int, 50 } 51 }] 52 53 def _extract_formats(self, spl_url, video): 54 for ext, pref, streams in ( 55 ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))), 56 ('mp4', 1, video.get('mp4'))): 57 for format_id, stream in streams.items(): 58 if not stream.get('url'): 59 continue 60 yield { 61 'format_id': f'{format_id}-{ext}', 62 'ext': ext, 63 'source_preference': pref, 64 'url': urljoin(spl_url, stream['url']), 65 'tbr': float_or_none(stream.get('bandwidth'), scale=1000), 66 'duration': float_or_none(stream.get('duration'), scale=1000), 67 'width': traverse_obj(stream, ('resolution', 0)), 68 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')), 69 **parse_codecs(stream.get('codec')), 70 } 71 72 def _real_extract(self, url): 73 display_id, video_id = self._match_valid_url(url).groups() 74 75 data = self._download_json( 76 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result', 77 data=json.dumps({ 78 'variables': {'urlName': video_id}, 79 'query': ''' 80 query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } } 81 fragment VideoDetailFragmentOnEpisode on Episode { 82 id 83 spl 84 urlName 85 name 86 perex 87 duration 88 views 89 }''' 90 }).encode('utf-8'), 91 headers={'Content-Type': 'application/json;charset=UTF-8'} 92 )['data']['episode'] 93 94 spl_url = data['spl'] + 'spl2,3' 95 metadata = self._download_json(spl_url, video_id, 'Downloading playlist') 96 if 'Location' in metadata and 'data' not in metadata: 97 spl_url = metadata['Location'] 98 metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist') 99 video = metadata['data'] 100 101 subtitles = {} 102 for subs in video.get('subtitles', {}).values(): 103 if not subs.get('language'): 104 continue 105 for ext, sub_url in subs.get('urls').items(): 106 subtitles.setdefault(subs['language'], []).append({ 107 'ext': ext, 108 'url': urljoin(spl_url, sub_url) 109 }) 110 111 formats = list(self._extract_formats(spl_url, video)) 112 self._sort_formats(formats) 113 114 return { 115 'id': video_id, 116 'display_id': display_id, 117 'title': data.get('name'), 118 'description': data.get('perex'), 119 'duration': float_or_none(data.get('duration')), 120 'view_count': int_or_none(data.get('views')), 121 'formats': formats, 122 'subtitles': subtitles, 123 } 124 [end of yt_dlp/extractor/streamcz.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/yt_dlp/extractor/streamcz.py b/yt_dlp/extractor/streamcz.py --- a/yt_dlp/extractor/streamcz.py +++ b/yt_dlp/extractor/streamcz.py @@ -52,8 +52,8 @@ def _extract_formats(self, spl_url, video): for ext, pref, streams in ( - ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))), - ('mp4', 1, video.get('mp4'))): + ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}), + ('mp4', 1, video.get('mp4') or {})): for format_id, stream in streams.items(): if not stream.get('url'): continue
{"golden_diff": "diff --git a/yt_dlp/extractor/streamcz.py b/yt_dlp/extractor/streamcz.py\n--- a/yt_dlp/extractor/streamcz.py\n+++ b/yt_dlp/extractor/streamcz.py\n@@ -52,8 +52,8 @@\n \n def _extract_formats(self, spl_url, video):\n for ext, pref, streams in (\n- ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\n- ('mp4', 1, video.get('mp4'))):\n+ ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}),\n+ ('mp4', 1, video.get('mp4') or {})):\n for format_id, stream in streams.items():\n if not stream.get('url'):\n continue\n", "issue": "StreamCZ extractor broken\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nworld\n\n### Description\n\nBroken support for stream.cz\r\nExample: https://www.stream.cz/autobazar/vecerni-auto-271357\n\n### Verbose log\n\n```shell\nC:\\Users\\xxx>yt-dlp.lnk https://www.stream.cz/autobazar/vecerni-auto-271357 -U -v\r\n[debug] Command-line config: ['https://www.stream.cz/autobazar/vecerni-auto-271357', '-U', '-v']\r\n[debug] User config \"C:\\Users\\xxx\\yt-dlp.conf\": ['--user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36', '--no-check-certificate', '-o', 'D:\\\\Downloads\\\\yt-dlp\\\\%(autonumber)03d_%(title)s.%(ext)s', '--no-mtime', '--format', 'bestvideo[ext=mp4][height<=1080][fps<=30]+bestaudio[ext=m4a]/best[ext=mp4][height<=1080][fps<=30]/best[height<=1080][fps<=30]/best', '--merge-output-format', 'mkv', '--ffmpeg-location', 'C:\\\\Apps\\\\ffmpeg\\\\bin\\\\ffmpeg.exe', '--all-subs', '-i', '--add-metadata', '--remux-video', 'mkv', '--add-header', 'accept-language: cs,fi;q=0.9,en-US;q=0.8,en;q=0.7,en-GB;q=0.6', '--add-header', 'sec-ch-ua: \"Chromium\";v=\"94\", \"Google Chrome\";v=\"94\", \";Not A Brand\";v=\"99\"', '--add-header', 'sec-ch-ua-arch: \"x86\"', '--add-header', 'sec-ch-ua-bitness: \"64\"', '--add-header', 'sec-ch-ua-full-version: \"94.0.4606.71\"', '--add-header', 'sec-ch-ua-mobile: ?0', '--add-header', 'sec-ch-ua-model: \"\"', '--add-header', 'sec-ch-ua-platform: \"Windows\"', '--add-header', 'sec-ch-ua-platform-version: \"10.0.0\"', '--add-header', 'sec-fetch-dest: document', '--add-header', 'sec-fetch-mode: navigate', '--add-header', 'sec-fetch-site: none', '--add-header', 'sec-fetch-user: ?1', '--add-header', 'service-worker-navigation-preload: true', '--add-header', 'upgrade-insecure-requests: 1', '--add-header', 'alt-svc: h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-T051=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"', '--add-header', 'cache-control: no-cache, no-store, max-age=0, must-revalidate']\r\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252\r\n[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19044-SP0\r\n[debug] Checking exe version: \"C:\\Apps\\ffmpeg\\bin\\ffmpeg.exe\" -bsfs\r\n[debug] Checking exe version: \"C:\\Apps\\ffmpeg\\bin\\ffprobe\" -bsfs\r\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev\r\n[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2022.04.08, Current version: 2022.04.08\r\nyt-dlp is up to date (2022.04.08)\r\n[debug] [StreamCZ] Extracting URL: https://www.stream.cz/autobazar/vecerni-auto-271357\r\n[StreamCZ] 271357: Downloading GraphQL result\r\n[StreamCZ] 271357: Downloading playlist\r\nERROR: 'NoneType' object has no attribute 'items'\r\nTraceback (most recent call last):\r\n File \"yt_dlp\\YoutubeDL.py\", line 1408, in wrapper\r\n File \"yt_dlp\\YoutubeDL.py\", line 1478, in __extract_info\r\n File \"yt_dlp\\extractor\\common.py\", line 641, in extract\r\n File \"yt_dlp\\extractor\\streamcz.py\", line 112, in _real_extract\r\n File \"yt_dlp\\extractor\\streamcz.py\", line 58, in _extract_formats\r\nAttributeError: 'NoneType' object has no attribute 'items'\n```\n\n", "before_files": [{"content": "import json\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n float_or_none,\n int_or_none,\n parse_codecs,\n traverse_obj,\n urljoin,\n)\n\n\nclass StreamCZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?(?:stream|televizeseznam)\\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',\n 'md5': '40c41ade1464a390a0b447e333df4239',\n 'info_dict': {\n 'id': '57953890',\n 'ext': 'mp4',\n 'title': 'B\u016fh',\n 'display_id': 'buh',\n 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',\n 'duration': 1369.6,\n 'view_count': int,\n }\n }, {\n 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',\n 'md5': '41fd358000086a1ccdb068c77809b158',\n 'info_dict': {\n 'id': '64087937',\n 'ext': 'mp4',\n 'title': 'Kdo to mluv\u00ed? Velk\u00e9 odhalen\u00ed p\u0159in\u00e1\u0161\u00ed nov\u00fd po\u0159ad u\u017e od 25. srpna',\n 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',\n 'description': 'md5:97a811000a6460266029d6c1c2ebcd59',\n 'duration': 50.2,\n 'view_count': int,\n }\n }, {\n 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',\n 'md5': '3ee4d0be040e8f4a543e67e509d55e3f',\n 'info_dict': {\n 'id': '64147267',\n 'ext': 'mp4',\n 'title': 'Zni\u010dehonic jim skrz st\u0159echu prol\u00edtnul z\u00e1hadn\u00fd p\u0159edm\u011bt. Badatel\u00e9 v\u0161e objasnili',\n 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',\n 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',\n 'duration': 442.84,\n 'view_count': int,\n }\n }]\n\n def _extract_formats(self, spl_url, video):\n for ext, pref, streams in (\n ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\n ('mp4', 1, video.get('mp4'))):\n for format_id, stream in streams.items():\n if not stream.get('url'):\n continue\n yield {\n 'format_id': f'{format_id}-{ext}',\n 'ext': ext,\n 'source_preference': pref,\n 'url': urljoin(spl_url, stream['url']),\n 'tbr': float_or_none(stream.get('bandwidth'), scale=1000),\n 'duration': float_or_none(stream.get('duration'), scale=1000),\n 'width': traverse_obj(stream, ('resolution', 0)),\n 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')),\n **parse_codecs(stream.get('codec')),\n }\n\n def _real_extract(self, url):\n display_id, video_id = self._match_valid_url(url).groups()\n\n data = self._download_json(\n 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',\n data=json.dumps({\n 'variables': {'urlName': video_id},\n 'query': '''\n query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }\n fragment VideoDetailFragmentOnEpisode on Episode {\n id\n spl\n urlName\n name\n perex\n duration\n views\n }'''\n }).encode('utf-8'),\n headers={'Content-Type': 'application/json;charset=UTF-8'}\n )['data']['episode']\n\n spl_url = data['spl'] + 'spl2,3'\n metadata = self._download_json(spl_url, video_id, 'Downloading playlist')\n if 'Location' in metadata and 'data' not in metadata:\n spl_url = metadata['Location']\n metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')\n video = metadata['data']\n\n subtitles = {}\n for subs in video.get('subtitles', {}).values():\n if not subs.get('language'):\n continue\n for ext, sub_url in subs.get('urls').items():\n subtitles.setdefault(subs['language'], []).append({\n 'ext': ext,\n 'url': urljoin(spl_url, sub_url)\n })\n\n formats = list(self._extract_formats(spl_url, video))\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'display_id': display_id,\n 'title': data.get('name'),\n 'description': data.get('perex'),\n 'duration': float_or_none(data.get('duration')),\n 'view_count': int_or_none(data.get('views')),\n 'formats': formats,\n 'subtitles': subtitles,\n }\n", "path": "yt_dlp/extractor/streamcz.py"}]}
3,845
182
gh_patches_debug_2571
rasdani/github-patches
git_diff
cupy__cupy-2318
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TypeError for OutOfMemoryError Seen while using chainer while multiprocessing and using the GPU: ``` Traceback (most recent call last): File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/usr/lib/python3.6/threading.py", line 864, in run self._target(*self._args, **self._kwargs) File "/usr/lib/python3.6/multiprocessing/pool.py", line 463, in _handle_results task = get() File "/usr/lib/python3.6/multiprocessing/connection.py", line 251, in recv return _ForkingPickler.loads(buf.getbuffer()) File "cupy/cuda/memory.pyx", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__ TypeError: __init__() takes exactly 3 positional arguments (2 given) ``` Seems like it tried to raise an OutOfMemoryError but failed to do so. ``` CuPy Version : 6.1.0 CUDA Root : /usr/local/cuda CUDA Build Version : 10010 CUDA Driver Version : 10010 CUDA Runtime Version : 10010 cuDNN Build Version : 7500 cuDNN Version : 7500 NCCL Build Version : 2402 NCCL Runtime Version : 2402 ``` </issue> <code> [start of cupy/cuda/compiler.py] 1 import hashlib 2 import math 3 import os 4 import re 5 import shutil 6 import sys 7 import tempfile 8 9 import six 10 11 from cupy.cuda import device 12 from cupy.cuda import function 13 from cupy.cuda import nvrtc 14 15 _nvrtc_version = None 16 _nvrtc_max_compute_capability = None 17 18 19 def _get_nvrtc_version(): 20 global _nvrtc_version 21 if _nvrtc_version is None: 22 _nvrtc_version = nvrtc.getVersion() 23 24 return _nvrtc_version 25 26 27 def _get_arch(): 28 global _nvrtc_max_compute_capability 29 if _nvrtc_max_compute_capability is None: 30 # See Supported Compile Options section of NVRTC User Guide for 31 # the maximum value allowed for `--gpu-architecture`. 32 major, minor = _get_nvrtc_version() 33 if major < 9: 34 # CUDA 7.0 / 7.5 / 8.0 35 _nvrtc_max_compute_capability = '50' 36 else: 37 # CUDA 9.0 / 9.1 38 _nvrtc_max_compute_capability = '70' 39 cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability) 40 return 'compute_%s' % cc 41 42 43 class TemporaryDirectory(object): 44 def __enter__(self): 45 self.path = tempfile.mkdtemp() 46 return self.path 47 48 def __exit__(self, exc_type, exc_value, traceback): 49 if exc_value is not None: 50 return 51 52 for name in os.listdir(self.path): 53 os.unlink(os.path.join(self.path, name)) 54 os.rmdir(self.path) 55 56 57 def _get_bool_env_variable(name, default): 58 val = os.environ.get(name) 59 if val is None or len(val) == 0: 60 return default 61 try: 62 return int(val) == 1 63 except ValueError: 64 return False 65 66 67 def compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'): 68 if not arch: 69 arch = _get_arch() 70 71 options += ('-arch={}'.format(arch),) 72 73 with TemporaryDirectory() as root_dir: 74 cu_path = os.path.join(root_dir, filename) 75 76 with open(cu_path, 'w') as cu_file: 77 cu_file.write(source) 78 79 prog = _NVRTCProgram(source, cu_path) 80 try: 81 ptx = prog.compile(options) 82 except CompileException as e: 83 dump = _get_bool_env_variable( 84 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False) 85 if dump: 86 e.dump(sys.stderr) 87 raise 88 89 return ptx 90 91 92 def _preprocess(source, options, arch): 93 options += ('-arch={}'.format(arch),) 94 95 prog = _NVRTCProgram(source, '') 96 try: 97 result = prog.compile(options) 98 except CompileException as e: 99 dump = _get_bool_env_variable( 100 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False) 101 if dump: 102 e.dump(sys.stderr) 103 raise 104 105 assert isinstance(result, six.text_type) 106 return result 107 108 109 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache') 110 111 112 def get_cache_dir(): 113 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir) 114 115 116 _empty_file_preprocess_cache = {} 117 118 119 def compile_with_cache(source, options=(), arch=None, cache_dir=None, 120 extra_source=None): 121 # NVRTC does not use extra_source. extra_source is used for cache key. 122 global _empty_file_preprocess_cache 123 if cache_dir is None: 124 cache_dir = get_cache_dir() 125 if arch is None: 126 arch = _get_arch() 127 128 options += ('-ftz=true',) 129 if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False): 130 options += ('--device-debug', '--generate-line-info') 131 132 env = (arch, options, _get_nvrtc_version()) 133 base = _empty_file_preprocess_cache.get(env, None) 134 if base is None: 135 # This is checking of NVRTC compiler internal version 136 base = _preprocess('', options, arch) 137 _empty_file_preprocess_cache[env] = base 138 key_src = '%s %s %s %s' % (env, base, source, extra_source) 139 140 key_src = key_src.encode('utf-8') 141 name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest() 142 143 if not os.path.isdir(cache_dir): 144 try: 145 os.makedirs(cache_dir) 146 except OSError: 147 if not os.path.isdir(cache_dir): 148 raise 149 150 mod = function.Module() 151 # To handle conflicts in concurrent situation, we adopt lock-free method 152 # to avoid performance degradation. 153 path = os.path.join(cache_dir, name) 154 if os.path.exists(path): 155 with open(path, 'rb') as file: 156 data = file.read() 157 if len(data) >= 32: 158 hash = data[:32] 159 cubin = data[32:] 160 cubin_hash = six.b(hashlib.md5(cubin).hexdigest()) 161 if hash == cubin_hash: 162 mod.load(cubin) 163 return mod 164 165 ptx = compile_using_nvrtc(source, options, arch, name + '.cu') 166 ls = function.LinkState() 167 ls.add_ptr_data(ptx, u'cupy.ptx') 168 cubin = ls.complete() 169 cubin_hash = six.b(hashlib.md5(cubin).hexdigest()) 170 171 # shutil.move is not atomic operation, so it could result in a corrupted 172 # file. We detect it by appending md5 hash at the beginning of each cache 173 # file. If the file is corrupted, it will be ignored next time it is read. 174 with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf: 175 tf.write(cubin_hash) 176 tf.write(cubin) 177 temp_path = tf.name 178 shutil.move(temp_path, path) 179 180 # Save .cu source file along with .cubin 181 if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False): 182 with open(path + '.cu', 'w') as f: 183 f.write(source) 184 185 mod.load(cubin) 186 return mod 187 188 189 class CompileException(Exception): 190 191 def __init__(self, msg, source, name, options): 192 self._msg = msg 193 self.source = source 194 self.name = name 195 self.options = options 196 197 def __repr__(self): 198 return str(self) 199 200 def __str__(self): 201 return self.get_message() 202 203 def get_message(self): 204 return self._msg 205 206 def dump(self, f): 207 lines = self.source.split('\n') 208 digits = int(math.floor(math.log10(len(lines)))) + 1 209 linum_fmt = '{{:0{}d}} '.format(digits) 210 f.write('NVRTC compilation error: {}\n'.format(self)) 211 f.write('-----\n') 212 f.write('Name: {}\n'.format(self.name)) 213 f.write('Options: {}\n'.format(' '.join(self.options))) 214 f.write('CUDA source:\n') 215 for i, line in enumerate(lines): 216 f.write(linum_fmt.format(i + 1) + line.rstrip() + '\n') 217 f.write('-----\n') 218 f.flush() 219 220 221 class _NVRTCProgram(object): 222 223 def __init__(self, src, name='default_program', headers=(), 224 include_names=()): 225 self.ptr = None 226 227 if isinstance(src, six.binary_type): 228 src = src.decode('UTF-8') 229 if isinstance(name, six.binary_type): 230 name = name.decode('UTF-8') 231 232 self.src = src 233 self.name = name 234 self.ptr = nvrtc.createProgram(src, name, headers, include_names) 235 236 def __del__(self): 237 if self.ptr: 238 nvrtc.destroyProgram(self.ptr) 239 240 def compile(self, options=()): 241 try: 242 nvrtc.compileProgram(self.ptr, options) 243 return nvrtc.getPTX(self.ptr) 244 except nvrtc.NVRTCError: 245 log = nvrtc.getProgramLog(self.ptr) 246 raise CompileException(log, self.src, self.name, options) 247 248 249 def is_valid_kernel_name(name): 250 return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None 251 [end of cupy/cuda/compiler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py --- a/cupy/cuda/compiler.py +++ b/cupy/cuda/compiler.py @@ -193,6 +193,10 @@ self.source = source self.name = name self.options = options + super(CompileException, self).__init__() + + def __reduce__(self): + return (type(self), (self._msg, self.source, self.name, self.options)) def __repr__(self): return str(self)
{"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -193,6 +193,10 @@\n self.source = source\n self.name = name\n self.options = options\n+ super(CompileException, self).__init__()\n+\n+ def __reduce__(self):\n+ return (type(self), (self._msg, self.source, self.name, self.options))\n \n def __repr__(self):\n return str(self)\n", "issue": "TypeError for OutOfMemoryError\nSeen while using chainer while multiprocessing and using the GPU:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/lib/python3.6/multiprocessing/pool.py\", line 463, in _handle_results\r\n task = get()\r\n File \"/usr/lib/python3.6/multiprocessing/connection.py\", line 251, in recv\r\n return _ForkingPickler.loads(buf.getbuffer())\r\n File \"cupy/cuda/memory.pyx\", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__\r\nTypeError: __init__() takes exactly 3 positional arguments (2 given)\r\n```\r\nSeems like it tried to raise an OutOfMemoryError but failed to do so.\r\n\r\n```\r\nCuPy Version : 6.1.0\r\nCUDA Root : /usr/local/cuda\r\nCUDA Build Version : 10010\r\nCUDA Driver Version : 10010\r\nCUDA Runtime Version : 10010\r\ncuDNN Build Version : 7500\r\ncuDNN Version : 7500\r\nNCCL Build Version : 2402\r\nNCCL Runtime Version : 2402\r\n```\n", "before_files": [{"content": "import hashlib\nimport math\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\n\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\nfrom cupy.cuda import nvrtc\n\n_nvrtc_version = None\n_nvrtc_max_compute_capability = None\n\n\ndef _get_nvrtc_version():\n global _nvrtc_version\n if _nvrtc_version is None:\n _nvrtc_version = nvrtc.getVersion()\n\n return _nvrtc_version\n\n\ndef _get_arch():\n global _nvrtc_max_compute_capability\n if _nvrtc_max_compute_capability is None:\n # See Supported Compile Options section of NVRTC User Guide for\n # the maximum value allowed for `--gpu-architecture`.\n major, minor = _get_nvrtc_version()\n if major < 9:\n # CUDA 7.0 / 7.5 / 8.0\n _nvrtc_max_compute_capability = '50'\n else:\n # CUDA 9.0 / 9.1\n _nvrtc_max_compute_capability = '70'\n cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability)\n return 'compute_%s' % cc\n\n\nclass TemporaryDirectory(object):\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _get_bool_env_variable(name, default):\n val = os.environ.get(name)\n if val is None or len(val) == 0:\n return default\n try:\n return int(val) == 1\n except ValueError:\n return False\n\n\ndef compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'):\n if not arch:\n arch = _get_arch()\n\n options += ('-arch={}'.format(arch),)\n\n with TemporaryDirectory() as root_dir:\n cu_path = os.path.join(root_dir, filename)\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n prog = _NVRTCProgram(source, cu_path)\n try:\n ptx = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n return ptx\n\n\ndef _preprocess(source, options, arch):\n options += ('-arch={}'.format(arch),)\n\n prog = _NVRTCProgram(source, '')\n try:\n result = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n assert isinstance(result, six.text_type)\n return result\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None,\n extra_source=None):\n # NVRTC does not use extra_source. extra_source is used for cache key.\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n options += ('-ftz=true',)\n if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):\n options += ('--device-debug', '--generate-line-info')\n\n env = (arch, options, _get_nvrtc_version())\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n # This is checking of NVRTC compiler internal version\n base = _preprocess('', options, arch)\n _empty_file_preprocess_cache[env] = base\n key_src = '%s %s %s %s' % (env, base, source, extra_source)\n\n key_src = key_src.encode('utf-8')\n name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()\n\n if not os.path.isdir(cache_dir):\n try:\n os.makedirs(cache_dir)\n except OSError:\n if not os.path.isdir(cache_dir):\n raise\n\n mod = function.Module()\n # To handle conflicts in concurrent situation, we adopt lock-free method\n # to avoid performance degradation.\n path = os.path.join(cache_dir, name)\n if os.path.exists(path):\n with open(path, 'rb') as file:\n data = file.read()\n if len(data) >= 32:\n hash = data[:32]\n cubin = data[32:]\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n if hash == cubin_hash:\n mod.load(cubin)\n return mod\n\n ptx = compile_using_nvrtc(source, options, arch, name + '.cu')\n ls = function.LinkState()\n ls.add_ptr_data(ptx, u'cupy.ptx')\n cubin = ls.complete()\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n\n # shutil.move is not atomic operation, so it could result in a corrupted\n # file. We detect it by appending md5 hash at the beginning of each cache\n # file. If the file is corrupted, it will be ignored next time it is read.\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n tf.write(cubin_hash)\n tf.write(cubin)\n temp_path = tf.name\n shutil.move(temp_path, path)\n\n # Save .cu source file along with .cubin\n if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):\n with open(path + '.cu', 'w') as f:\n f.write(source)\n\n mod.load(cubin)\n return mod\n\n\nclass CompileException(Exception):\n\n def __init__(self, msg, source, name, options):\n self._msg = msg\n self.source = source\n self.name = name\n self.options = options\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return self.get_message()\n\n def get_message(self):\n return self._msg\n\n def dump(self, f):\n lines = self.source.split('\\n')\n digits = int(math.floor(math.log10(len(lines)))) + 1\n linum_fmt = '{{:0{}d}} '.format(digits)\n f.write('NVRTC compilation error: {}\\n'.format(self))\n f.write('-----\\n')\n f.write('Name: {}\\n'.format(self.name))\n f.write('Options: {}\\n'.format(' '.join(self.options)))\n f.write('CUDA source:\\n')\n for i, line in enumerate(lines):\n f.write(linum_fmt.format(i + 1) + line.rstrip() + '\\n')\n f.write('-----\\n')\n f.flush()\n\n\nclass _NVRTCProgram(object):\n\n def __init__(self, src, name='default_program', headers=(),\n include_names=()):\n self.ptr = None\n\n if isinstance(src, six.binary_type):\n src = src.decode('UTF-8')\n if isinstance(name, six.binary_type):\n name = name.decode('UTF-8')\n\n self.src = src\n self.name = name\n self.ptr = nvrtc.createProgram(src, name, headers, include_names)\n\n def __del__(self):\n if self.ptr:\n nvrtc.destroyProgram(self.ptr)\n\n def compile(self, options=()):\n try:\n nvrtc.compileProgram(self.ptr, options)\n return nvrtc.getPTX(self.ptr)\n except nvrtc.NVRTCError:\n log = nvrtc.getProgramLog(self.ptr)\n raise CompileException(log, self.src, self.name, options)\n\n\ndef is_valid_kernel_name(name):\n return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None\n", "path": "cupy/cuda/compiler.py"}]}
3,359
123
gh_patches_debug_6199
rasdani/github-patches
git_diff
microsoft__botbuilder-python-886
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SkillDialog not working for non-'expected replies' scenario ## Version 4.8.0 ## Describe the bug SkillDialog won't work out of the box for non expected-replies scenarios. ## To Reproduce Steps to reproduce the behavior: 1. Set up a root bot using skill dialog and a skill bot 2. Run both bots and initiate the SkillDialog 3. When the skill first comes back to the parent an error like the following should arise: ``` File "..path-to-botbuilder/botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py", line 213, in _send_to_skill if not 200 <= response.status <= 299: AttributeError: 'NoneType' object has no attribute 'status' ``` ## Expected behavior The response get back to the parent without any problems ## Workaround If the skill bot is modified to always send some content in every successful response at the route handler level, the scenario should work. Example on how to do this for an aiohttp skill bot: ```python #This is how a typical message handler method could look like async def messages(req: Request) -> Response: # Main bot message handler. if "application/json" in req.headers["Content-Type"]: body = await req.json() else: return Response(status=415) activity = Activity().deserialize(body) auth_header = req.headers["Authorization"] if "Authorization" in req.headers else "" response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn) if response: return json_response(data=response.body, status=response.status) # THE FIX IS IN THE LINE BELOW return Response(status=201, body='{"foo":"bar"}'.encode("utf-8")) ``` **Alternative Workaround:** use expected replies as delivery method in the parent bot (SkillDialog). [bug] </issue> <code> [start of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 # pylint: disable=no-member 4 5 import json 6 from typing import Dict 7 from logging import Logger 8 9 import aiohttp 10 from botbuilder.core import InvokeResponse 11 from botbuilder.core.skills import BotFrameworkClient 12 from botbuilder.schema import ( 13 Activity, 14 ExpectedReplies, 15 ConversationReference, 16 ConversationAccount, 17 ) 18 from botframework.connector.auth import ( 19 ChannelProvider, 20 CredentialProvider, 21 GovernmentConstants, 22 MicrosoftAppCredentials, 23 ) 24 25 26 class BotFrameworkHttpClient(BotFrameworkClient): 27 28 """ 29 A skill host adapter implements API to forward activity to a skill and 30 implements routing ChannelAPI calls from the Skill up through the bot/adapter. 31 """ 32 33 INVOKE_ACTIVITY_NAME = "SkillEvents.ChannelApiInvoke" 34 _BOT_IDENTITY_KEY = "BotIdentity" 35 _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {} 36 37 def __init__( 38 self, 39 credential_provider: CredentialProvider, 40 channel_provider: ChannelProvider = None, 41 logger: Logger = None, 42 ): 43 if not credential_provider: 44 raise TypeError("credential_provider can't be None") 45 46 self._credential_provider = credential_provider 47 self._channel_provider = channel_provider 48 self._logger = logger 49 self._session = aiohttp.ClientSession() 50 51 async def post_activity( 52 self, 53 from_bot_id: str, 54 to_bot_id: str, 55 to_url: str, 56 service_url: str, 57 conversation_id: str, 58 activity: Activity, 59 ) -> InvokeResponse: 60 app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id) 61 62 if not app_credentials: 63 raise KeyError("Unable to get appCredentials to connect to the skill") 64 65 # Get token for the skill call 66 token = ( 67 app_credentials.get_access_token() 68 if app_credentials.microsoft_app_id 69 else None 70 ) 71 72 # Capture current activity settings before changing them. 73 # TODO: DO we need to set the activity ID? (events that are created manually don't have it). 74 original_conversation_id = activity.conversation.id 75 original_service_url = activity.service_url 76 original_caller_id = activity.caller_id 77 original_relates_to = activity.relates_to 78 79 try: 80 # TODO: The relato has to be ported to the adapter in the new integration library when 81 # resolving conflicts in merge 82 activity.relates_to = ConversationReference( 83 service_url=activity.service_url, 84 activity_id=activity.id, 85 channel_id=activity.channel_id, 86 conversation=ConversationAccount( 87 id=activity.conversation.id, 88 name=activity.conversation.name, 89 conversation_type=activity.conversation.conversation_type, 90 aad_object_id=activity.conversation.aad_object_id, 91 is_group=activity.conversation.is_group, 92 role=activity.conversation.role, 93 tenant_id=activity.conversation.tenant_id, 94 properties=activity.conversation.properties, 95 ), 96 bot=None, 97 ) 98 activity.conversation.id = conversation_id 99 activity.service_url = service_url 100 activity.caller_id = f"urn:botframework:aadappid:{from_bot_id}" 101 102 headers_dict = { 103 "Content-type": "application/json; charset=utf-8", 104 } 105 if token: 106 headers_dict.update( 107 {"Authorization": f"Bearer {token}",} 108 ) 109 110 json_content = json.dumps(activity.serialize()) 111 resp = await self._session.post( 112 to_url, data=json_content.encode("utf-8"), headers=headers_dict, 113 ) 114 resp.raise_for_status() 115 data = (await resp.read()).decode() 116 content = json.loads(data) if data else None 117 118 if content: 119 return InvokeResponse(status=resp.status, body=content) 120 121 finally: 122 # Restore activity properties. 123 activity.conversation.id = original_conversation_id 124 activity.service_url = original_service_url 125 activity.caller_id = original_caller_id 126 activity.relates_to = original_relates_to 127 128 async def post_buffered_activity( 129 self, 130 from_bot_id: str, 131 to_bot_id: str, 132 to_url: str, 133 service_url: str, 134 conversation_id: str, 135 activity: Activity, 136 ) -> [Activity]: 137 """ 138 Helper method to return a list of activities when an Activity is being 139 sent with DeliveryMode == expectReplies. 140 """ 141 response = await self.post_activity( 142 from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity 143 ) 144 if not response or (response.status / 100) != 2: 145 return [] 146 return ExpectedReplies().deserialize(response.body).activities 147 148 async def _get_app_credentials( 149 self, app_id: str, oauth_scope: str 150 ) -> MicrosoftAppCredentials: 151 if not app_id: 152 return MicrosoftAppCredentials(None, None) 153 154 cache_key = f"{app_id}{oauth_scope}" 155 app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key) 156 157 if app_credentials: 158 return app_credentials 159 160 app_password = await self._credential_provider.get_app_password(app_id) 161 app_credentials = MicrosoftAppCredentials( 162 app_id, app_password, oauth_scope=oauth_scope 163 ) 164 if self._channel_provider and self._channel_provider.is_government(): 165 app_credentials.oauth_endpoint = ( 166 GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL 167 ) 168 app_credentials.oauth_scope = ( 169 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE 170 ) 171 172 BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials 173 return app_credentials 174 [end of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py --- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py +++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py @@ -115,8 +115,7 @@ data = (await resp.read()).decode() content = json.loads(data) if data else None - if content: - return InvokeResponse(status=resp.status, body=content) + return InvokeResponse(status=resp.status, body=content) finally: # Restore activity properties.
{"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n@@ -115,8 +115,7 @@\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n \n- if content:\n- return InvokeResponse(status=resp.status, body=content)\n+ return InvokeResponse(status=resp.status, body=content)\n \n finally:\n # Restore activity properties.\n", "issue": "SkillDialog not working for non-'expected replies' scenario\n## Version\r\n4.8.0\r\n\r\n## Describe the bug\r\nSkillDialog won't work out of the box for non expected-replies scenarios.\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Set up a root bot using skill dialog and a skill bot\r\n2. Run both bots and initiate the SkillDialog\r\n3. When the skill first comes back to the parent an error like the following should arise:\r\n```\r\nFile \"..path-to-botbuilder/botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\", line 213, in _send_to_skill\r\n if not 200 <= response.status <= 299:\r\nAttributeError: 'NoneType' object has no attribute 'status'\r\n```\r\n\r\n## Expected behavior\r\nThe response get back to the parent without any problems\r\n\r\n## Workaround\r\nIf the skill bot is modified to always send some content in every successful response at the route handler level, the scenario should work.\r\n\r\nExample on how to do this for an aiohttp skill bot:\r\n```python\r\n#This is how a typical message handler method could look like\r\nasync def messages(req: Request) -> Response:\r\n # Main bot message handler.\r\n if \"application/json\" in req.headers[\"Content-Type\"]:\r\n body = await req.json()\r\n else:\r\n return Response(status=415)\r\n\r\n activity = Activity().deserialize(body)\r\n auth_header = req.headers[\"Authorization\"] if \"Authorization\" in req.headers else \"\"\r\n\r\n response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)\r\n if response:\r\n return json_response(data=response.body, status=response.status)\r\n # THE FIX IS IN THE LINE BELOW\r\n return Response(status=201, body='{\"foo\":\"bar\"}'.encode(\"utf-8\"))\r\n```\r\n\r\n**Alternative Workaround:** use expected replies as delivery method in the parent bot (SkillDialog).\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n GovernmentConstants,\n MicrosoftAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_caller_id = activity.caller_id\n original_relates_to = activity.relates_to\n\n try:\n # TODO: The relato has to be ported to the adapter in the new integration library when\n # resolving conflicts in merge\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n if content:\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.caller_id = original_caller_id\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> MicrosoftAppCredentials:\n if not app_id:\n return MicrosoftAppCredentials(None, None)\n\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n\n if app_credentials:\n return app_credentials\n\n app_password = await self._credential_provider.get_app_password(app_id)\n app_credentials = MicrosoftAppCredentials(\n app_id, app_password, oauth_scope=oauth_scope\n )\n if self._channel_provider and self._channel_provider.is_government():\n app_credentials.oauth_endpoint = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n )\n app_credentials.oauth_scope = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}]}
2,620
187
gh_patches_debug_8973
rasdani/github-patches
git_diff
spesmilo__electrum-2164
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> HTTPS cert expired on LabelSync server The cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working </issue> <code> [start of plugins/labels/labels.py] 1 import hashlib 2 import requests 3 import threading 4 import json 5 import sys 6 import traceback 7 8 import aes 9 import base64 10 11 import electrum 12 from electrum.plugins import BasePlugin, hook 13 from electrum.i18n import _ 14 15 16 17 18 class LabelsPlugin(BasePlugin): 19 20 def __init__(self, parent, config, name): 21 BasePlugin.__init__(self, parent, config, name) 22 self.target_host = 'sync.bytesized-hosting.com:9090' 23 self.wallets = {} 24 25 def encode(self, wallet, msg): 26 password, iv, wallet_id = self.wallets[wallet] 27 encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv, 28 msg.encode('utf8')) 29 return base64.b64encode(encrypted) 30 31 def decode(self, wallet, message): 32 password, iv, wallet_id = self.wallets[wallet] 33 decoded = base64.b64decode(message) 34 decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded) 35 return decrypted.decode('utf8') 36 37 def get_nonce(self, wallet): 38 # nonce is the nonce to be used with the next change 39 nonce = wallet.storage.get('wallet_nonce') 40 if nonce is None: 41 nonce = 1 42 self.set_nonce(wallet, nonce) 43 return nonce 44 45 def set_nonce(self, wallet, nonce): 46 self.print_error("set", wallet.basename(), "nonce to", nonce) 47 wallet.storage.put("wallet_nonce", nonce) 48 49 @hook 50 def set_label(self, wallet, item, label): 51 if not wallet in self.wallets: 52 return 53 nonce = self.get_nonce(wallet) 54 wallet_id = self.wallets[wallet][2] 55 bundle = {"walletId": wallet_id, 56 "walletNonce": nonce, 57 "externalId": self.encode(wallet, item), 58 "encryptedLabel": self.encode(wallet, label)} 59 t = threading.Thread(target=self.do_request, 60 args=["POST", "/label", False, bundle]) 61 t.setDaemon(True) 62 t.start() 63 # Caller will write the wallet 64 self.set_nonce(wallet, nonce + 1) 65 66 def do_request(self, method, url = "/labels", is_batch=False, data=None): 67 url = 'https://' + self.target_host + url 68 kwargs = {'headers': {}} 69 if method == 'GET' and data: 70 kwargs['params'] = data 71 elif method == 'POST' and data: 72 kwargs['data'] = json.dumps(data) 73 kwargs['headers']['Content-Type'] = 'application/json' 74 response = requests.request(method, url, **kwargs) 75 if response.status_code != 200: 76 raise BaseException(response.status_code, response.text) 77 response = response.json() 78 if "error" in response: 79 raise BaseException(response["error"]) 80 return response 81 82 def push_thread(self, wallet): 83 wallet_id = self.wallets[wallet][2] 84 bundle = {"labels": [], 85 "walletId": wallet_id, 86 "walletNonce": self.get_nonce(wallet)} 87 for key, value in wallet.labels.iteritems(): 88 try: 89 encoded_key = self.encode(wallet, key) 90 encoded_value = self.encode(wallet, value) 91 except: 92 self.print_error('cannot encode', repr(key), repr(value)) 93 continue 94 bundle["labels"].append({'encryptedLabel': encoded_value, 95 'externalId': encoded_key}) 96 self.do_request("POST", "/labels", True, bundle) 97 98 def pull_thread(self, wallet, force): 99 wallet_id = self.wallets[wallet][2] 100 nonce = 1 if force else self.get_nonce(wallet) - 1 101 self.print_error("asking for labels since nonce", nonce) 102 try: 103 response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) )) 104 if response["labels"] is None: 105 self.print_error('no new labels') 106 return 107 result = {} 108 for label in response["labels"]: 109 try: 110 key = self.decode(wallet, label["externalId"]) 111 value = self.decode(wallet, label["encryptedLabel"]) 112 except: 113 continue 114 try: 115 json.dumps(key) 116 json.dumps(value) 117 except: 118 self.print_error('error: no json', key) 119 continue 120 result[key] = value 121 122 for key, value in result.items(): 123 if force or not wallet.labels.get(key): 124 wallet.labels[key] = value 125 126 self.print_error("received %d labels" % len(response)) 127 # do not write to disk because we're in a daemon thread 128 wallet.storage.put('labels', wallet.labels) 129 self.set_nonce(wallet, response["nonce"] + 1) 130 self.on_pulled(wallet) 131 132 except Exception as e: 133 traceback.print_exc(file=sys.stderr) 134 self.print_error("could not retrieve labels") 135 136 def start_wallet(self, wallet): 137 nonce = self.get_nonce(wallet) 138 self.print_error("wallet", wallet.basename(), "nonce is", nonce) 139 mpk = wallet.get_fingerprint() 140 if not mpk: 141 return 142 password = hashlib.sha1(mpk).digest().encode('hex')[:32] 143 iv = hashlib.sha256(password).digest()[:16] 144 wallet_id = hashlib.sha256(mpk).digest().encode('hex') 145 self.wallets[wallet] = (password, iv, wallet_id) 146 # If there is an auth token we can try to actually start syncing 147 t = threading.Thread(target=self.pull_thread, args=(wallet, False)) 148 t.setDaemon(True) 149 t.start() 150 151 def stop_wallet(self, wallet): 152 self.wallets.pop(wallet, None) 153 [end of plugins/labels/labels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py --- a/plugins/labels/labels.py +++ b/plugins/labels/labels.py @@ -5,7 +5,6 @@ import sys import traceback -import aes import base64 import electrum @@ -19,7 +18,7 @@ def __init__(self, parent, config, name): BasePlugin.__init__(self, parent, config, name) - self.target_host = 'sync.bytesized-hosting.com:9090' + self.target_host = 'labels.bauerj.eu' self.wallets = {} def encode(self, wallet, msg):
{"golden_diff": "diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py\n--- a/plugins/labels/labels.py\n+++ b/plugins/labels/labels.py\n@@ -5,7 +5,6 @@\n import sys\n import traceback\n \n-import aes\n import base64\n \n import electrum\n@@ -19,7 +18,7 @@\n \n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n- self.target_host = 'sync.bytesized-hosting.com:9090'\n+ self.target_host = 'labels.bauerj.eu'\n self.wallets = {}\n \n def encode(self, wallet, msg):\n", "issue": "HTTPS cert expired on LabelSync server\nThe cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working \n", "before_files": [{"content": "import hashlib\nimport requests\nimport threading\nimport json\nimport sys\nimport traceback\n\nimport aes\nimport base64\n\nimport electrum\nfrom electrum.plugins import BasePlugin, hook\nfrom electrum.i18n import _\n\n\n\n\nclass LabelsPlugin(BasePlugin):\n\n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n self.target_host = 'sync.bytesized-hosting.com:9090'\n self.wallets = {}\n\n def encode(self, wallet, msg):\n password, iv, wallet_id = self.wallets[wallet]\n encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\n msg.encode('utf8'))\n return base64.b64encode(encrypted)\n\n def decode(self, wallet, message):\n password, iv, wallet_id = self.wallets[wallet]\n decoded = base64.b64decode(message)\n decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\n return decrypted.decode('utf8')\n\n def get_nonce(self, wallet):\n # nonce is the nonce to be used with the next change\n nonce = wallet.storage.get('wallet_nonce')\n if nonce is None:\n nonce = 1\n self.set_nonce(wallet, nonce)\n return nonce\n\n def set_nonce(self, wallet, nonce):\n self.print_error(\"set\", wallet.basename(), \"nonce to\", nonce)\n wallet.storage.put(\"wallet_nonce\", nonce)\n\n @hook\n def set_label(self, wallet, item, label):\n if not wallet in self.wallets:\n return\n nonce = self.get_nonce(wallet)\n wallet_id = self.wallets[wallet][2]\n bundle = {\"walletId\": wallet_id,\n \"walletNonce\": nonce,\n \"externalId\": self.encode(wallet, item),\n \"encryptedLabel\": self.encode(wallet, label)}\n t = threading.Thread(target=self.do_request,\n args=[\"POST\", \"/label\", False, bundle])\n t.setDaemon(True)\n t.start()\n # Caller will write the wallet\n self.set_nonce(wallet, nonce + 1)\n\n def do_request(self, method, url = \"/labels\", is_batch=False, data=None):\n url = 'https://' + self.target_host + url\n kwargs = {'headers': {}}\n if method == 'GET' and data:\n kwargs['params'] = data\n elif method == 'POST' and data:\n kwargs['data'] = json.dumps(data)\n kwargs['headers']['Content-Type'] = 'application/json'\n response = requests.request(method, url, **kwargs)\n if response.status_code != 200:\n raise BaseException(response.status_code, response.text)\n response = response.json()\n if \"error\" in response:\n raise BaseException(response[\"error\"])\n return response\n\n def push_thread(self, wallet):\n wallet_id = self.wallets[wallet][2]\n bundle = {\"labels\": [],\n \"walletId\": wallet_id,\n \"walletNonce\": self.get_nonce(wallet)}\n for key, value in wallet.labels.iteritems():\n try:\n encoded_key = self.encode(wallet, key)\n encoded_value = self.encode(wallet, value)\n except:\n self.print_error('cannot encode', repr(key), repr(value))\n continue\n bundle[\"labels\"].append({'encryptedLabel': encoded_value,\n 'externalId': encoded_key})\n self.do_request(\"POST\", \"/labels\", True, bundle)\n\n def pull_thread(self, wallet, force):\n wallet_id = self.wallets[wallet][2]\n nonce = 1 if force else self.get_nonce(wallet) - 1\n self.print_error(\"asking for labels since nonce\", nonce)\n try:\n response = self.do_request(\"GET\", (\"/labels/since/%d/for/%s\" % (nonce, wallet_id) ))\n if response[\"labels\"] is None:\n self.print_error('no new labels')\n return\n result = {}\n for label in response[\"labels\"]:\n try:\n key = self.decode(wallet, label[\"externalId\"])\n value = self.decode(wallet, label[\"encryptedLabel\"])\n except:\n continue\n try:\n json.dumps(key)\n json.dumps(value)\n except:\n self.print_error('error: no json', key)\n continue\n result[key] = value\n\n for key, value in result.items():\n if force or not wallet.labels.get(key):\n wallet.labels[key] = value\n\n self.print_error(\"received %d labels\" % len(response))\n # do not write to disk because we're in a daemon thread\n wallet.storage.put('labels', wallet.labels)\n self.set_nonce(wallet, response[\"nonce\"] + 1)\n self.on_pulled(wallet)\n\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n self.print_error(\"could not retrieve labels\")\n\n def start_wallet(self, wallet):\n nonce = self.get_nonce(wallet)\n self.print_error(\"wallet\", wallet.basename(), \"nonce is\", nonce)\n mpk = wallet.get_fingerprint()\n if not mpk:\n return\n password = hashlib.sha1(mpk).digest().encode('hex')[:32]\n iv = hashlib.sha256(password).digest()[:16]\n wallet_id = hashlib.sha256(mpk).digest().encode('hex')\n self.wallets[wallet] = (password, iv, wallet_id)\n # If there is an auth token we can try to actually start syncing\n t = threading.Thread(target=self.pull_thread, args=(wallet, False))\n t.setDaemon(True)\n t.start()\n\n def stop_wallet(self, wallet):\n self.wallets.pop(wallet, None)\n", "path": "plugins/labels/labels.py"}]}
2,159
152
gh_patches_debug_29856
rasdani/github-patches
git_diff
mesonbuild__meson-9174
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Get the i18n targets inside the meson files I need to generate a file from the i18n *.gmo files. This `custom_target` needs to depend on the `<project_id>-gmo` target documented [here](https://mesonbuild.com/i18n-module.html). This target should be retrievable through a method like : ``` gettext_targets = i18n.gettext('mypackage') custom_target( depends: gettext_targets.target_gmo() ) ``` For now, I think the only way to achieve that is something like that : ``` ninja_gmo = custom_target(command: [ 'ninja', '-C' meson.build_root(), 'mypackage-gmo']) ``` </issue> <code> [start of mesonbuild/modules/i18n.py] 1 # Copyright 2016 The Meson development team 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import shutil 16 17 from os import path 18 from .. import coredata, mesonlib, build, mlog 19 from ..mesonlib import MesonException 20 from ..scripts.gettext import read_linguas 21 from . import ModuleReturnValue 22 from . import ExtensionModule 23 from ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs 24 25 PRESET_ARGS = { 26 'glib': [ 27 '--from-code=UTF-8', 28 '--add-comments', 29 30 # https://developer.gnome.org/glib/stable/glib-I18N.html 31 '--keyword=_', 32 '--keyword=N_', 33 '--keyword=C_:1c,2', 34 '--keyword=NC_:1c,2', 35 '--keyword=g_dcgettext:2', 36 '--keyword=g_dngettext:2,3', 37 '--keyword=g_dpgettext2:2c,3', 38 39 '--flag=N_:1:pass-c-format', 40 '--flag=C_:2:pass-c-format', 41 '--flag=NC_:2:pass-c-format', 42 '--flag=g_dngettext:2:pass-c-format', 43 '--flag=g_strdup_printf:1:c-format', 44 '--flag=g_string_printf:2:c-format', 45 '--flag=g_string_append_printf:2:c-format', 46 '--flag=g_error_new:3:c-format', 47 '--flag=g_set_error:4:c-format', 48 '--flag=g_markup_printf_escaped:1:c-format', 49 '--flag=g_log:3:c-format', 50 '--flag=g_print:1:c-format', 51 '--flag=g_printerr:1:c-format', 52 '--flag=g_printf:1:c-format', 53 '--flag=g_fprintf:2:c-format', 54 '--flag=g_sprintf:2:c-format', 55 '--flag=g_snprintf:3:c-format', 56 ] 57 } 58 59 60 class I18nModule(ExtensionModule): 61 def __init__(self, interpreter): 62 super().__init__(interpreter) 63 self.methods.update({ 64 'merge_file': self.merge_file, 65 'gettext': self.gettext, 66 }) 67 68 @staticmethod 69 def nogettext_warning(): 70 mlog.warning('Gettext not found, all translation targets will be ignored.', once=True) 71 72 @staticmethod 73 def _get_data_dirs(state, dirs): 74 """Returns source directories of relative paths""" 75 src_dir = path.join(state.environment.get_source_dir(), state.subdir) 76 return [path.join(src_dir, d) for d in dirs] 77 78 @FeatureNew('i18n.merge_file', '0.37.0') 79 @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args']) 80 @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'}) 81 def merge_file(self, state, args, kwargs): 82 if not shutil.which('xgettext'): 83 self.nogettext_warning() 84 return 85 podir = kwargs.pop('po_dir', None) 86 if not podir: 87 raise MesonException('i18n: po_dir is a required kwarg') 88 podir = path.join(state.build_to_src, state.subdir, podir) 89 90 file_type = kwargs.pop('type', 'xml') 91 VALID_TYPES = ('xml', 'desktop') 92 if file_type not in VALID_TYPES: 93 raise MesonException(f'i18n: "{file_type}" is not a valid type {VALID_TYPES}') 94 95 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', []))) 96 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None 97 98 command = state.environment.get_build_command() + [ 99 '--internal', 'msgfmthelper', 100 '@INPUT@', '@OUTPUT@', file_type, podir 101 ] 102 if datadirs: 103 command.append(datadirs) 104 105 if 'args' in kwargs: 106 command.append('--') 107 command.append(mesonlib.stringlistify(kwargs.pop('args', []))) 108 109 kwargs['command'] = command 110 111 # We only use this input file to create a name of the custom target. 112 # Thus we can ignore the other entries. 113 inputfile = mesonlib.extract_as_list(kwargs, 'input')[0] 114 if isinstance(inputfile, str): 115 inputfile = mesonlib.File.from_source_file(state.environment.source_dir, 116 state.subdir, inputfile) 117 if isinstance(inputfile, mesonlib.File): 118 # output could be '@BASENAME@' in which case we need to do substitutions 119 # to get a unique target name. 120 output = kwargs['output'] 121 ifile_abs = inputfile.absolute_path(state.environment.source_dir, 122 state.environment.build_dir) 123 values = mesonlib.get_filenames_templates_dict([ifile_abs], None) 124 outputs = mesonlib.substitute_values([output], values) 125 output = outputs[0] 126 ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\', '@') + '_merge', state.subdir, state.subproject, kwargs) 127 else: 128 ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs) 129 130 return ModuleReturnValue(ct, [ct]) 131 132 @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset']) 133 @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir']) 134 @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'}) 135 def gettext(self, state, args, kwargs): 136 if len(args) != 1: 137 raise coredata.MesonException('Gettext requires one positional argument (package name).') 138 if not shutil.which('xgettext'): 139 self.nogettext_warning() 140 return 141 packagename = args[0] 142 languages = mesonlib.stringlistify(kwargs.get('languages', [])) 143 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', []))) 144 extra_args = mesonlib.stringlistify(kwargs.get('args', [])) 145 targets = [] 146 147 preset = kwargs.pop('preset', None) 148 if preset: 149 preset_args = PRESET_ARGS.get(preset) 150 if not preset_args: 151 raise coredata.MesonException('i18n: Preset "{}" is not one of the valid options: {}'.format( 152 preset, list(PRESET_ARGS.keys()))) 153 extra_args = set(preset_args + extra_args) 154 155 pkg_arg = '--pkgname=' + packagename 156 lang_arg = '--langs=' + '@@'.join(languages) if languages else None 157 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None 158 extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None 159 160 potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg] 161 if datadirs: 162 potargs.append(datadirs) 163 if extra_args: 164 potargs.append(extra_args) 165 pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject) 166 targets.append(pottarget) 167 168 install = kwargs.get('install', True) 169 install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir'))) 170 if not languages: 171 languages = read_linguas(path.join(state.environment.source_dir, state.subdir)) 172 for l in languages: 173 po_file = mesonlib.File.from_source_file(state.environment.source_dir, 174 state.subdir, l+'.po') 175 gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'], 176 'input': po_file, 177 'output': packagename+'.mo', 178 'install': install, 179 # We have multiple files all installed as packagename+'.mo' in different install subdirs. 180 # What we really wanted to do, probably, is have a rename: kwarg, but that's not available 181 # to custom_targets. Crude hack: set the build target's subdir manually. 182 # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir. 183 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'), 184 'install_tag': 'i18n', 185 } 186 gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs) 187 targets.append(gmotarget) 188 189 updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg] 190 if lang_arg: 191 updatepoargs.append(lang_arg) 192 if datadirs: 193 updatepoargs.append(datadirs) 194 if extra_args: 195 updatepoargs.append(extra_args) 196 updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject) 197 targets.append(updatepotarget) 198 199 return ModuleReturnValue(None, targets) 200 201 def initialize(*args, **kwargs): 202 return I18nModule(*args, **kwargs) 203 [end of mesonbuild/modules/i18n.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py --- a/mesonbuild/modules/i18n.py +++ b/mesonbuild/modules/i18n.py @@ -143,6 +143,7 @@ datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', []))) extra_args = mesonlib.stringlistify(kwargs.get('args', [])) targets = [] + gmotargets = [] preset = kwargs.pop('preset', None) if preset: @@ -185,6 +186,10 @@ } gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs) targets.append(gmotarget) + gmotargets.append(gmotarget) + + allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject) + targets.append(allgmotarget) updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg] if lang_arg: @@ -196,7 +201,7 @@ updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject) targets.append(updatepotarget) - return ModuleReturnValue(None, targets) + return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets) def initialize(*args, **kwargs): return I18nModule(*args, **kwargs)
{"golden_diff": "diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py\n--- a/mesonbuild/modules/i18n.py\n+++ b/mesonbuild/modules/i18n.py\n@@ -143,6 +143,7 @@\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n targets = []\n+ gmotargets = []\n \n preset = kwargs.pop('preset', None)\n if preset:\n@@ -185,6 +186,10 @@\n }\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n targets.append(gmotarget)\n+ gmotargets.append(gmotarget)\n+\n+ allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)\n+ targets.append(allgmotarget)\n \n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n@@ -196,7 +201,7 @@\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n targets.append(updatepotarget)\n \n- return ModuleReturnValue(None, targets)\n+ return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)\n \n def initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n", "issue": "Get the i18n targets inside the meson files\nI need to generate a file from the i18n *.gmo files. This `custom_target` needs to depend on the `<project_id>-gmo` target documented [here](https://mesonbuild.com/i18n-module.html).\r\n\r\nThis target should be retrievable through a method like :\r\n```\r\ngettext_targets = i18n.gettext('mypackage')\r\ncustom_target(\r\n depends: gettext_targets.target_gmo()\r\n)\r\n```\r\n\r\nFor now, I think the only way to achieve that is something like that :\r\n```\r\nninja_gmo = custom_target(command:\u00a0[ 'ninja', '-C' meson.build_root(), 'mypackage-gmo'])\r\n```\n", "before_files": [{"content": "# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\n\nfrom os import path\nfrom .. import coredata, mesonlib, build, mlog\nfrom ..mesonlib import MesonException\nfrom ..scripts.gettext import read_linguas\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs\n\nPRESET_ARGS = {\n 'glib': [\n '--from-code=UTF-8',\n '--add-comments',\n\n # https://developer.gnome.org/glib/stable/glib-I18N.html\n '--keyword=_',\n '--keyword=N_',\n '--keyword=C_:1c,2',\n '--keyword=NC_:1c,2',\n '--keyword=g_dcgettext:2',\n '--keyword=g_dngettext:2,3',\n '--keyword=g_dpgettext2:2c,3',\n\n '--flag=N_:1:pass-c-format',\n '--flag=C_:2:pass-c-format',\n '--flag=NC_:2:pass-c-format',\n '--flag=g_dngettext:2:pass-c-format',\n '--flag=g_strdup_printf:1:c-format',\n '--flag=g_string_printf:2:c-format',\n '--flag=g_string_append_printf:2:c-format',\n '--flag=g_error_new:3:c-format',\n '--flag=g_set_error:4:c-format',\n '--flag=g_markup_printf_escaped:1:c-format',\n '--flag=g_log:3:c-format',\n '--flag=g_print:1:c-format',\n '--flag=g_printerr:1:c-format',\n '--flag=g_printf:1:c-format',\n '--flag=g_fprintf:2:c-format',\n '--flag=g_sprintf:2:c-format',\n '--flag=g_snprintf:3:c-format',\n ]\n}\n\n\nclass I18nModule(ExtensionModule):\n def __init__(self, interpreter):\n super().__init__(interpreter)\n self.methods.update({\n 'merge_file': self.merge_file,\n 'gettext': self.gettext,\n })\n\n @staticmethod\n def nogettext_warning():\n mlog.warning('Gettext not found, all translation targets will be ignored.', once=True)\n\n @staticmethod\n def _get_data_dirs(state, dirs):\n \"\"\"Returns source directories of relative paths\"\"\"\n src_dir = path.join(state.environment.get_source_dir(), state.subdir)\n return [path.join(src_dir, d) for d in dirs]\n\n @FeatureNew('i18n.merge_file', '0.37.0')\n @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args'])\n @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'})\n def merge_file(self, state, args, kwargs):\n if not shutil.which('xgettext'):\n self.nogettext_warning()\n return\n podir = kwargs.pop('po_dir', None)\n if not podir:\n raise MesonException('i18n: po_dir is a required kwarg')\n podir = path.join(state.build_to_src, state.subdir, podir)\n\n file_type = kwargs.pop('type', 'xml')\n VALID_TYPES = ('xml', 'desktop')\n if file_type not in VALID_TYPES:\n raise MesonException(f'i18n: \"{file_type}\" is not a valid type {VALID_TYPES}')\n\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n\n command = state.environment.get_build_command() + [\n '--internal', 'msgfmthelper',\n '@INPUT@', '@OUTPUT@', file_type, podir\n ]\n if datadirs:\n command.append(datadirs)\n\n if 'args' in kwargs:\n command.append('--')\n command.append(mesonlib.stringlistify(kwargs.pop('args', [])))\n\n kwargs['command'] = command\n\n # We only use this input file to create a name of the custom target.\n # Thus we can ignore the other entries.\n inputfile = mesonlib.extract_as_list(kwargs, 'input')[0]\n if isinstance(inputfile, str):\n inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, inputfile)\n if isinstance(inputfile, mesonlib.File):\n # output could be '@BASENAME@' in which case we need to do substitutions\n # to get a unique target name.\n output = kwargs['output']\n ifile_abs = inputfile.absolute_path(state.environment.source_dir,\n state.environment.build_dir)\n values = mesonlib.get_filenames_templates_dict([ifile_abs], None)\n outputs = mesonlib.substitute_values([output], values)\n output = outputs[0]\n ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\\\', '@') + '_merge', state.subdir, state.subproject, kwargs)\n else:\n ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\n\n return ModuleReturnValue(ct, [ct])\n\n @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])\n @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir'])\n @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'})\n def gettext(self, state, args, kwargs):\n if len(args) != 1:\n raise coredata.MesonException('Gettext requires one positional argument (package name).')\n if not shutil.which('xgettext'):\n self.nogettext_warning()\n return\n packagename = args[0]\n languages = mesonlib.stringlistify(kwargs.get('languages', []))\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n targets = []\n\n preset = kwargs.pop('preset', None)\n if preset:\n preset_args = PRESET_ARGS.get(preset)\n if not preset_args:\n raise coredata.MesonException('i18n: Preset \"{}\" is not one of the valid options: {}'.format(\n preset, list(PRESET_ARGS.keys())))\n extra_args = set(preset_args + extra_args)\n\n pkg_arg = '--pkgname=' + packagename\n lang_arg = '--langs=' + '@@'.join(languages) if languages else None\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\n\n potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]\n if datadirs:\n potargs.append(datadirs)\n if extra_args:\n potargs.append(extra_args)\n pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)\n targets.append(pottarget)\n\n install = kwargs.get('install', True)\n install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir')))\n if not languages:\n languages = read_linguas(path.join(state.environment.source_dir, state.subdir))\n for l in languages:\n po_file = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, l+'.po')\n gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'],\n 'input': po_file,\n 'output': packagename+'.mo',\n 'install': install,\n # We have multiple files all installed as packagename+'.mo' in different install subdirs.\n # What we really wanted to do, probably, is have a rename: kwarg, but that's not available\n # to custom_targets. Crude hack: set the build target's subdir manually.\n # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.\n 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'),\n 'install_tag': 'i18n',\n }\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n targets.append(gmotarget)\n\n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n updatepoargs.append(lang_arg)\n if datadirs:\n updatepoargs.append(datadirs)\n if extra_args:\n updatepoargs.append(extra_args)\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n targets.append(updatepotarget)\n\n return ModuleReturnValue(None, targets)\n\ndef initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n", "path": "mesonbuild/modules/i18n.py"}]}
3,350
376
gh_patches_debug_9970
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-770
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E2532 when Step Functions Pass state has Parameters key *cfn-lint version: 0.17.0* Using latest version of cfn-lint and updated spec files. *Description of issue.* Linting a CF template fails when a `AWS::StepFunctions::StateMachine`'s `DefinitionString` has a `Pass` state with the `Parameters` key. Example template: ```yaml AWSTemplateFormatVersion: 2010-09-09 Resources: ExampleStateMachine: Type: AWS::StepFunctions::StateMachine Properties: DefinitionString: |- { "StartAt": "Pop Element from List", "States": { "Pop Element from List": { "Type": "Pass", "Parameters": { "List.$": "$.List[1:]" }, "End": true } } } RoleArn: redacted ``` Error: ``` E2532 State Machine Definition key (Parameters) for State (Pop Element from List) of Type (Pass) is not valid example.yaml:6:13 ``` However, the above `Pass` state conforms to the ASL [spec](https://states-language.net/spec.html#state-type-table) and [docs](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) and comes directly from the [Transfer Data Records sample project](https://docs.aws.amazon.com/step-functions/latest/dg/sample-project-transfer-data-sqs.html). </issue> <code> [start of src/cfnlint/rules/resources/stepfunctions/StateMachine.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import json 18 import six 19 from cfnlint import CloudFormationLintRule 20 from cfnlint import RuleMatch 21 22 23 class StateMachine(CloudFormationLintRule): 24 """Check State Machine Definition""" 25 id = 'E2532' 26 shortdesc = 'Check State Machine Definition for proper syntax' 27 description = 'Check the State Machine String Definition to make sure its JSON. ' \ 28 'Validate basic syntax of the file to determine validity.' 29 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html' 30 tags = ['resources', 'stepfunctions'] 31 32 def __init__(self): 33 """Init""" 34 super(StateMachine, self).__init__() 35 self.resource_property_types.append('AWS::StepFunctions::StateMachine') 36 37 def _check_state_json(self, def_json, state_name, path): 38 """Check State JSON Definition""" 39 matches = [] 40 41 # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html 42 common_state_keys = [ 43 'Next', 44 'End', 45 'Type', 46 'Comment', 47 'InputPath', 48 'OutputPath', 49 ] 50 common_state_required_keys = [ 51 'Type', 52 ] 53 state_key_types = { 54 'Pass': ['Result', 'ResultPath'], 55 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'], 56 'Choice': ['Choices', 'Default'], 57 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'], 58 'Succeed': [], 59 'Fail': ['Cause', 'Error'], 60 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch'] 61 } 62 state_required_types = { 63 'Pass': [], 64 'Task': ['Resource'], 65 'Choice': ['Choices'], 66 'Wait': [], 67 'Succeed': [], 68 'Fail': [], 69 'Parallel': ['Branches'] 70 } 71 72 for req_key in common_state_required_keys: 73 if req_key not in def_json: 74 message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name) 75 matches.append(RuleMatch(path, message)) 76 return matches 77 78 state_type = def_json.get('Type') 79 80 if state_type in state_key_types: 81 for state_key, _ in def_json.items(): 82 if state_key not in common_state_keys + state_key_types.get(state_type, []): 83 message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type) 84 matches.append(RuleMatch(path, message)) 85 for req_key in common_state_required_keys + state_required_types.get(state_type, []): 86 if req_key not in def_json: 87 message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type) 88 matches.append(RuleMatch(path, message)) 89 return matches 90 else: 91 message = 'State Machine Definition Type (%s) is not valid' % (state_type) 92 matches.append(RuleMatch(path, message)) 93 94 return matches 95 96 def _check_definition_json(self, def_json, path): 97 """Check JSON Definition""" 98 matches = [] 99 100 top_level_keys = [ 101 'Comment', 102 'StartAt', 103 'TimeoutSeconds', 104 'Version', 105 'States' 106 ] 107 top_level_required_keys = [ 108 'StartAt', 109 'States' 110 ] 111 for top_key, _ in def_json.items(): 112 if top_key not in top_level_keys: 113 message = 'State Machine Definition key (%s) is not valid' % top_key 114 matches.append(RuleMatch(path, message)) 115 116 for req_key in top_level_required_keys: 117 if req_key not in def_json: 118 message = 'State Machine Definition required key (%s) is missing' % req_key 119 matches.append(RuleMatch(path, message)) 120 121 for state_name, state_value in def_json.get('States', {}).items(): 122 matches.extend(self._check_state_json(state_value, state_name, path)) 123 return matches 124 125 def check_value(self, value, path, fail_on_loads=True): 126 """Check Definition Value""" 127 matches = [] 128 try: 129 def_json = json.loads(value) 130 # pylint: disable=W0703 131 except Exception as err: 132 if fail_on_loads: 133 message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err 134 matches.append(RuleMatch(path, message)) 135 return matches 136 137 self.logger.debug('State Machine definition could not be parsed. Skipping') 138 return matches 139 140 matches.extend(self._check_definition_json(def_json, path)) 141 return matches 142 143 def check_sub(self, value, path): 144 """Check Sub Object""" 145 matches = [] 146 if isinstance(value, list): 147 matches.extend(self.check_value(value[0], path, False)) 148 elif isinstance(value, six.string_types): 149 matches.extend(self.check_value(value, path, False)) 150 151 return matches 152 153 def match_resource_properties(self, properties, _, path, cfn): 154 """Check CloudFormation Properties""" 155 matches = [] 156 157 matches.extend( 158 cfn.check_value( 159 obj=properties, key='DefinitionString', 160 path=path[:], 161 check_value=self.check_value, 162 check_sub=self.check_sub 163 )) 164 165 return matches 166 [end of src/cfnlint/rules/resources/stepfunctions/StateMachine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py --- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py +++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py @@ -51,7 +51,7 @@ 'Type', ] state_key_types = { - 'Pass': ['Result', 'ResultPath'], + 'Pass': ['Result', 'ResultPath', 'Parameters'], 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'], 'Choice': ['Choices', 'Default'], 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n@@ -51,7 +51,7 @@\n 'Type',\n ]\n state_key_types = {\n- 'Pass': ['Result', 'ResultPath'],\n+ 'Pass': ['Result', 'ResultPath', 'Parameters'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n", "issue": "E2532 when Step Functions Pass state has Parameters key\n*cfn-lint version: 0.17.0*\r\n\r\nUsing latest version of cfn-lint and updated spec files.\r\n\r\n*Description of issue.*\r\n\r\nLinting a CF template fails when a `AWS::StepFunctions::StateMachine`'s `DefinitionString` has a `Pass` state with the `Parameters` key.\r\n\r\nExample template:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n ExampleStateMachine:\r\n Type: AWS::StepFunctions::StateMachine\r\n Properties:\r\n DefinitionString: |-\r\n {\r\n \"StartAt\": \"Pop Element from List\",\r\n \"States\": {\r\n \"Pop Element from List\": {\r\n \"Type\": \"Pass\",\r\n \"Parameters\": {\r\n \"List.$\": \"$.List[1:]\"\r\n },\r\n \"End\": true\r\n }\r\n }\r\n }\r\n RoleArn: redacted\r\n```\r\n\r\nError:\r\n\r\n```\r\nE2532 State Machine Definition key (Parameters) for State (Pop Element from List) of Type (Pass) is not valid\r\nexample.yaml:6:13\r\n```\r\n\r\nHowever, the above `Pass` state conforms to the ASL [spec](https://states-language.net/spec.html#state-type-table) and [docs](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) and comes directly from the [Transfer Data Records sample project](https://docs.aws.amazon.com/step-functions/latest/dg/sample-project-transfer-data-sqs.html).\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport json\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E2532'\n shortdesc = 'Check State Machine Definition for proper syntax'\n description = 'Check the State Machine String Definition to make sure its JSON. ' \\\n 'Validate basic syntax of the file to determine validity.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'\n tags = ['resources', 'stepfunctions']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(StateMachine, self).__init__()\n self.resource_property_types.append('AWS::StepFunctions::StateMachine')\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n 'Next',\n 'End',\n 'Type',\n 'Comment',\n 'InputPath',\n 'OutputPath',\n ]\n common_state_required_keys = [\n 'Type',\n ]\n state_key_types = {\n 'Pass': ['Result', 'ResultPath'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n 'Succeed': [],\n 'Fail': ['Cause', 'Error'],\n 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']\n }\n state_required_types = {\n 'Pass': [],\n 'Task': ['Resource'],\n 'Choice': ['Choices'],\n 'Wait': [],\n 'Succeed': [],\n 'Fail': [],\n 'Parallel': ['Branches']\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get('Type')\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(state_type, []):\n message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(state_type, []):\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = 'State Machine Definition Type (%s) is not valid' % (state_type)\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\n 'Comment',\n 'StartAt',\n 'TimeoutSeconds',\n 'Version',\n 'States'\n ]\n top_level_required_keys = [\n 'StartAt',\n 'States'\n ]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = 'State Machine Definition key (%s) is not valid' % top_key\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) is missing' % req_key\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get('States', {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path, fail_on_loads=True):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n if fail_on_loads:\n message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err\n matches.append(RuleMatch(path, message))\n return matches\n\n self.logger.debug('State Machine definition could not be parsed. Skipping')\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path, False))\n elif isinstance(value, six.string_types):\n matches.extend(self.check_value(value, path, False))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties, key='DefinitionString',\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}]}
2,666
168
gh_patches_debug_29287
rasdani/github-patches
git_diff
weni-ai__bothub-engine-77
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Is possible translate example to same language </issue> <code> [start of bothub/api/serializers/translate.py] 1 from rest_framework import serializers 2 3 from django.utils.translation import gettext as _ 4 5 from bothub.common.models import RepositoryTranslatedExampleEntity 6 from bothub.common.models import RepositoryTranslatedExample 7 from bothub.common.models import RepositoryExample 8 9 from ..validators import CanContributeInRepositoryTranslatedExampleValidator 10 from ..validators import CanContributeInRepositoryExampleValidator 11 from ..validators import TranslatedExampleEntitiesValidator 12 13 14 class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer): 15 class Meta: 16 model = RepositoryTranslatedExampleEntity 17 fields = [ 18 'id', 19 'repository_translated_example', 20 'start', 21 'end', 22 'entity', 23 'created_at', 24 'value', 25 ] 26 27 repository_translated_example = serializers.PrimaryKeyRelatedField( 28 queryset=RepositoryTranslatedExample.objects, 29 validators=[ 30 CanContributeInRepositoryTranslatedExampleValidator(), 31 ], 32 help_text='Example translation ID') 33 value = serializers.SerializerMethodField() 34 35 def get_value(self, obj): 36 return obj.value 37 38 39 class RepositoryTranslatedExampleSerializer(serializers.ModelSerializer): 40 class Meta: 41 model = RepositoryTranslatedExample 42 fields = [ 43 'id', 44 'original_example', 45 'from_language', 46 'language', 47 'text', 48 'has_valid_entities', 49 'entities', 50 'created_at', 51 ] 52 53 original_example = serializers.PrimaryKeyRelatedField( 54 queryset=RepositoryExample.objects, 55 validators=[ 56 CanContributeInRepositoryExampleValidator(), 57 ], 58 help_text=_('Example\'s ID')) 59 from_language = serializers.SerializerMethodField() 60 has_valid_entities = serializers.SerializerMethodField() 61 entities = RepositoryTranslatedExampleEntitySeralizer( 62 many=True, 63 read_only=True) 64 65 def get_from_language(self, obj): 66 return obj.original_example.repository_update.language 67 68 def get_has_valid_entities(self, obj): 69 return obj.has_valid_entities 70 71 72 class NewRepositoryTranslatedExampleEntitySeralizer( 73 serializers.ModelSerializer): 74 class Meta: 75 model = RepositoryTranslatedExampleEntity 76 fields = [ 77 'start', 78 'end', 79 'entity', 80 ] 81 82 83 class NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer): 84 class Meta: 85 model = RepositoryTranslatedExample 86 fields = [ 87 'id', 88 'original_example', 89 'language', 90 'text', 91 'has_valid_entities', 92 'entities', 93 ] 94 95 def __init__(self, *args, **kwargs): 96 super().__init__(*args, **kwargs) 97 self.validators.append(TranslatedExampleEntitiesValidator()) 98 99 original_example = serializers.PrimaryKeyRelatedField( 100 queryset=RepositoryExample.objects, 101 validators=[ 102 CanContributeInRepositoryExampleValidator(), 103 ], 104 help_text=_('Example\'s ID')) 105 has_valid_entities = serializers.SerializerMethodField() 106 entities = NewRepositoryTranslatedExampleEntitySeralizer( 107 many=True, 108 style={'text_field': 'text'}) 109 110 def get_has_valid_entities(self, obj): 111 return obj.has_valid_entities 112 113 def create(self, validated_data): 114 entities_data = validated_data.pop('entities') 115 116 translated = self.Meta.model.objects.create(**validated_data) 117 for entity_data in entities_data: 118 RepositoryTranslatedExampleEntity.objects.create( 119 repository_translated_example=translated, 120 **entity_data) 121 return translated 122 [end of bothub/api/serializers/translate.py] [start of bothub/api/validators.py] 1 from django.utils.translation import gettext as _ 2 from rest_framework.exceptions import PermissionDenied 3 from rest_framework.exceptions import ValidationError 4 5 from bothub.common.models import RepositoryTranslatedExample 6 7 8 class CanContributeInRepositoryValidator(object): 9 def __call__(self, value): 10 user_authorization = value.get_user_authorization( 11 self.request.user) 12 if not user_authorization.can_contribute: 13 raise PermissionDenied( 14 _('You can\'t contribute in this repository')) 15 16 def set_context(self, serializer): 17 self.request = serializer.context.get('request') 18 19 20 class CanContributeInRepositoryExampleValidator(object): 21 def __call__(self, value): 22 repository = value.repository_update.repository 23 user_authorization = repository.get_user_authorization( 24 self.request.user) 25 if not user_authorization.can_contribute: 26 raise PermissionDenied( 27 _('You can\'t contribute in this repository')) 28 29 def set_context(self, serializer): 30 self.request = serializer.context.get('request') 31 32 33 class CanContributeInRepositoryTranslatedExampleValidator(object): 34 def __call__(self, value): 35 repository = value.original_example.repository_update.repository 36 user_authorization = repository.get_user_authorization( 37 self.request.user) 38 if not user_authorization.can_contribute: 39 raise PermissionDenied( 40 _('You can\'t contribute in this repository')) 41 42 def set_context(self, serializer): 43 self.request = serializer.context.get('request') 44 45 46 class TranslatedExampleEntitiesValidator(object): 47 def __call__(self, attrs): 48 original_example = attrs.get('original_example') 49 entities_valid = RepositoryTranslatedExample.same_entities_validator( 50 list(map(lambda x: dict(x), attrs.get('entities'))), 51 list(map(lambda x: x.to_dict, original_example.entities.all()))) 52 if not entities_valid: 53 raise ValidationError({'entities': _('Invalid entities')}) 54 [end of bothub/api/validators.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py --- a/bothub/api/serializers/translate.py +++ b/bothub/api/serializers/translate.py @@ -9,6 +9,7 @@ from ..validators import CanContributeInRepositoryTranslatedExampleValidator from ..validators import CanContributeInRepositoryExampleValidator from ..validators import TranslatedExampleEntitiesValidator +from ..validators import TranslatedExampleLanguageValidator class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer): @@ -95,6 +96,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.validators.append(TranslatedExampleEntitiesValidator()) + self.validators.append(TranslatedExampleLanguageValidator()) original_example = serializers.PrimaryKeyRelatedField( queryset=RepositoryExample.objects, diff --git a/bothub/api/validators.py b/bothub/api/validators.py --- a/bothub/api/validators.py +++ b/bothub/api/validators.py @@ -51,3 +51,11 @@ list(map(lambda x: x.to_dict, original_example.entities.all()))) if not entities_valid: raise ValidationError({'entities': _('Invalid entities')}) + + +class TranslatedExampleLanguageValidator(object): + def __call__(self, attrs): + original_example = attrs.get('original_example') + language = attrs.get('language') + if original_example.repository_update.language == language: + raise ValidationError({'language': _('Can\'t translate to same language')})
{"golden_diff": "diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py\n--- a/bothub/api/serializers/translate.py\n+++ b/bothub/api/serializers/translate.py\n@@ -9,6 +9,7 @@\n from ..validators import CanContributeInRepositoryTranslatedExampleValidator\n from ..validators import CanContributeInRepositoryExampleValidator\n from ..validators import TranslatedExampleEntitiesValidator\n+from ..validators import TranslatedExampleLanguageValidator\n \n \n class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n@@ -95,6 +96,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n+ self.validators.append(TranslatedExampleLanguageValidator())\n \n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\n--- a/bothub/api/validators.py\n+++ b/bothub/api/validators.py\n@@ -51,3 +51,11 @@\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n+\n+\n+class TranslatedExampleLanguageValidator(object):\n+ def __call__(self, attrs):\n+ original_example = attrs.get('original_example')\n+ language = attrs.get('language')\n+ if original_example.repository_update.language == language:\n+ raise ValidationError({'language': _('Can\\'t translate to same language')})\n", "issue": "Is possible translate example to same language\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n", "path": "bothub/api/validators.py"}]}
1,995
347
gh_patches_debug_37497
rasdani/github-patches
git_diff
Parsl__parsl-2221
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PBSPro scheduler options ignored **Describe the bug** The PBSPro provider template adds `${scheduler_options}` to the bottom of the list of #PBS commands. However, PBSPro seems to only consider the first #PBS option and ignore any later competing ones. This means specifying a new select option with gpus is superseded by the default one and ignored. We can resolve this by moving the user defined scheduler options to the top of the #PBS list in the template. **To Reproduce** Use the PBSPro provider (e.g., at ALCF's edge testbed for Polaris) and try to specify a new select option:`-l select=1:ncpus=32:ngpus=1`. These options are ignored in favor of the default specified above in the template. **Expected behavior** I would like scheduler_options to be put first so they can be used to enter specific options. **Environment** - Parsl 1.2.0 **Distributed Environment** - Where are you running the Parsl script from ? ALCF - Where do you need the workers to run ? Compute nodes </issue> <code> [start of parsl/providers/pbspro/template.py] 1 template_string = '''#!/bin/bash 2 3 #PBS -S /bin/bash 4 #PBS -N ${jobname} 5 #PBS -m n 6 #PBS -l walltime=$walltime 7 #PBS -l select=${nodes_per_block}:ncpus=${ncpus} 8 #PBS -o ${submit_script_dir}/${jobname}.submit.stdout 9 #PBS -e ${submit_script_dir}/${jobname}.submit.stderr 10 ${scheduler_options} 11 12 ${worker_init} 13 14 export JOBNAME="${jobname}" 15 16 ${user_script} 17 18 ''' 19 [end of parsl/providers/pbspro/template.py] [start of parsl/providers/pbspro/pbspro.py] 1 import logging 2 import os 3 import time 4 5 from parsl.channels import LocalChannel 6 from parsl.launchers import SingleNodeLauncher 7 from parsl.providers.pbspro.template import template_string 8 from parsl.providers import TorqueProvider 9 from parsl.providers.provider_base import JobState, JobStatus 10 11 logger = logging.getLogger(__name__) 12 13 14 class PBSProProvider(TorqueProvider): 15 """PBS Pro Execution Provider 16 17 Parameters 18 ---------- 19 channel : Channel 20 Channel for accessing this provider. Possible channels include 21 :class:`~parsl.channels.LocalChannel` (the default), 22 :class:`~parsl.channels.SSHChannel`, or 23 :class:`~parsl.channels.SSHInteractiveLoginChannel`. 24 account : str 25 Account the job will be charged against. 26 queue : str 27 Queue to request blocks from. 28 nodes_per_block : int 29 Nodes to provision per block. 30 cpus_per_node : int 31 CPUs to provision per node. 32 init_blocks : int 33 Number of blocks to provision at the start of the run. Default is 1. 34 min_blocks : int 35 Minimum number of blocks to maintain. Default is 0. 36 max_blocks : int 37 Maximum number of blocks to maintain. 38 parallelism : float 39 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive 40 scaling where as many resources as possible are used; parallelism close to 0 represents 41 the opposite situation in which as few resources as possible (i.e., min_blocks) are used. 42 walltime : str 43 Walltime requested per block in HH:MM:SS. 44 scheduler_options : str 45 String to prepend to the #PBS blocks in the submit script to the scheduler. 46 worker_init : str 47 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'. 48 launcher : Launcher 49 Launcher for this provider. The default is 50 :class:`~parsl.launchers.SingleNodeLauncher`. 51 """ 52 def __init__(self, 53 channel=LocalChannel(), 54 account=None, 55 queue=None, 56 scheduler_options='', 57 worker_init='', 58 nodes_per_block=1, 59 cpus_per_node=1, 60 init_blocks=1, 61 min_blocks=0, 62 max_blocks=1, 63 parallelism=1, 64 launcher=SingleNodeLauncher(), 65 walltime="00:20:00", 66 cmd_timeout=120): 67 super().__init__(channel, 68 account, 69 queue, 70 scheduler_options, 71 worker_init, 72 nodes_per_block, 73 init_blocks, 74 min_blocks, 75 max_blocks, 76 parallelism, 77 launcher, 78 walltime, 79 cmd_timeout=cmd_timeout) 80 81 self.template_string = template_string 82 self._label = 'pbspro' 83 self.cpus_per_node = cpus_per_node 84 85 def submit(self, command, tasks_per_node, job_name="parsl"): 86 """Submits the command job. 87 88 Parameters 89 ---------- 90 command : str 91 Command to be executed on the remote side. 92 tasks_per_node : int 93 Command invocations to be launched per node. 94 job_name : str 95 Identifier for job. 96 97 Returns 98 ------- 99 None 100 If at capacity and cannot provision more 101 job_id : str 102 Identifier for the job 103 """ 104 105 job_name = "{0}.{1}".format(job_name, time.time()) 106 107 script_path = os.path.abspath("{0}/{1}.submit".format(self.script_dir, job_name)) 108 109 logger.debug("Requesting {} nodes_per_block, {} tasks_per_node".format( 110 self.nodes_per_block, tasks_per_node) 111 ) 112 113 job_config = {} 114 job_config["submit_script_dir"] = self.channel.script_dir 115 job_config["nodes_per_block"] = self.nodes_per_block 116 job_config["ncpus"] = self.cpus_per_node 117 job_config["walltime"] = self.walltime 118 job_config["scheduler_options"] = self.scheduler_options 119 job_config["worker_init"] = self.worker_init 120 job_config["user_script"] = command 121 122 # Wrap the command 123 job_config["user_script"] = self.launcher(command, 124 tasks_per_node, 125 self.nodes_per_block) 126 127 logger.debug("Writing submit script") 128 self._write_submit_script(self.template_string, script_path, job_name, job_config) 129 130 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir) 131 132 submit_options = '' 133 if self.queue is not None: 134 submit_options = '{0} -q {1}'.format(submit_options, self.queue) 135 if self.account is not None: 136 submit_options = '{0} -A {1}'.format(submit_options, self.account) 137 138 launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path) 139 retcode, stdout, stderr = self.execute_wait(launch_cmd) 140 141 job_id = None 142 if retcode == 0: 143 for line in stdout.split('\n'): 144 if line.strip(): 145 job_id = line.strip() 146 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)} 147 else: 148 message = "Command '{}' failed with return code {}".format(launch_cmd, retcode) 149 if (stdout is not None) and (stderr is not None): 150 message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip()) 151 logger.error(message) 152 153 return job_id 154 155 @property 156 def status_polling_interval(self): 157 return 60 158 [end of parsl/providers/pbspro/pbspro.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsl/providers/pbspro/pbspro.py b/parsl/providers/pbspro/pbspro.py --- a/parsl/providers/pbspro/pbspro.py +++ b/parsl/providers/pbspro/pbspro.py @@ -43,6 +43,9 @@ Walltime requested per block in HH:MM:SS. scheduler_options : str String to prepend to the #PBS blocks in the submit script to the scheduler. + select_options : str + String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to + specify ngpus. worker_init : str Command to be run before starting a worker, such as 'module load Anaconda; source activate env'. launcher : Launcher @@ -54,6 +57,7 @@ account=None, queue=None, scheduler_options='', + select_options='', worker_init='', nodes_per_block=1, cpus_per_node=1, @@ -81,6 +85,7 @@ self.template_string = template_string self._label = 'pbspro' self.cpus_per_node = cpus_per_node + self.select_options = select_options def submit(self, command, tasks_per_node, job_name="parsl"): """Submits the command job. @@ -119,6 +124,12 @@ job_config["worker_init"] = self.worker_init job_config["user_script"] = command + # Add a colon to select_options if one isn't included + if self.select_options and not self.select_options.startswith(":"): + self.select_options = ":" + self.select_options + + job_config["select_options"] = self.select_options + # Wrap the command job_config["user_script"] = self.launcher(command, tasks_per_node, diff --git a/parsl/providers/pbspro/template.py b/parsl/providers/pbspro/template.py --- a/parsl/providers/pbspro/template.py +++ b/parsl/providers/pbspro/template.py @@ -4,7 +4,7 @@ #PBS -N ${jobname} #PBS -m n #PBS -l walltime=$walltime -#PBS -l select=${nodes_per_block}:ncpus=${ncpus} +#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options} #PBS -o ${submit_script_dir}/${jobname}.submit.stdout #PBS -e ${submit_script_dir}/${jobname}.submit.stderr ${scheduler_options}
{"golden_diff": "diff --git a/parsl/providers/pbspro/pbspro.py b/parsl/providers/pbspro/pbspro.py\n--- a/parsl/providers/pbspro/pbspro.py\n+++ b/parsl/providers/pbspro/pbspro.py\n@@ -43,6 +43,9 @@\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n+ select_options : str\n+ String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to\n+ specify ngpus.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n@@ -54,6 +57,7 @@\n account=None,\n queue=None,\n scheduler_options='',\n+ select_options='',\n worker_init='',\n nodes_per_block=1,\n cpus_per_node=1,\n@@ -81,6 +85,7 @@\n self.template_string = template_string\n self._label = 'pbspro'\n self.cpus_per_node = cpus_per_node\n+ self.select_options = select_options\n \n def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n \"\"\"Submits the command job.\n@@ -119,6 +124,12 @@\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n \n+ # Add a colon to select_options if one isn't included\n+ if self.select_options and not self.select_options.startswith(\":\"):\n+ self.select_options = \":\" + self.select_options\n+\n+ job_config[\"select_options\"] = self.select_options\n+\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\ndiff --git a/parsl/providers/pbspro/template.py b/parsl/providers/pbspro/template.py\n--- a/parsl/providers/pbspro/template.py\n+++ b/parsl/providers/pbspro/template.py\n@@ -4,7 +4,7 @@\n #PBS -N ${jobname}\n #PBS -m n\n #PBS -l walltime=$walltime\n-#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\n+#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n ${scheduler_options}\n", "issue": "PBSPro scheduler options ignored\n**Describe the bug**\r\nThe PBSPro provider template adds `${scheduler_options}` to the bottom of the list of #PBS commands. However, PBSPro seems to only consider the first #PBS option and ignore any later competing ones. This means specifying a new select option with gpus is superseded by the default one and ignored. We can resolve this by moving the user defined scheduler options to the top of the #PBS list in the template.\r\n\r\n**To Reproduce**\r\nUse the PBSPro provider (e.g., at ALCF's edge testbed for Polaris) and try to specify a new select option:`-l select=1:ncpus=32:ngpus=1`. These options are ignored in favor of the default specified above in the template.\r\n\r\n**Expected behavior**\r\nI would like scheduler_options to be put first so they can be used to enter specific options.\r\n\r\n**Environment**\r\n - Parsl 1.2.0\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? ALCF\r\n- Where do you need the workers to run ? Compute nodes\r\n\n", "before_files": [{"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -l walltime=$walltime\n#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n${scheduler_options}\n\n${worker_init}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/pbspro/template.py"}, {"content": "import logging\nimport os\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.pbspro.template import template_string\nfrom parsl.providers import TorqueProvider\nfrom parsl.providers.provider_base import JobState, JobStatus\n\nlogger = logging.getLogger(__name__)\n\n\nclass PBSProProvider(TorqueProvider):\n \"\"\"PBS Pro Execution Provider\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n account : str\n Account the job will be charged against.\n queue : str\n Queue to request blocks from.\n nodes_per_block : int\n Nodes to provision per block.\n cpus_per_node : int\n CPUs to provision per node.\n init_blocks : int\n Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain. Default is 0.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n Launcher for this provider. The default is\n :class:`~parsl.launchers.SingleNodeLauncher`.\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n account=None,\n queue=None,\n scheduler_options='',\n worker_init='',\n nodes_per_block=1,\n cpus_per_node=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=1,\n parallelism=1,\n launcher=SingleNodeLauncher(),\n walltime=\"00:20:00\",\n cmd_timeout=120):\n super().__init__(channel,\n account,\n queue,\n scheduler_options,\n worker_init,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n launcher,\n walltime,\n cmd_timeout=cmd_timeout)\n\n self.template_string = template_string\n self._label = 'pbspro'\n self.cpus_per_node = cpus_per_node\n\n def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n \"\"\"Submits the command job.\n\n Parameters\n ----------\n command : str\n Command to be executed on the remote side.\n tasks_per_node : int\n Command invocations to be launched per node.\n job_name : str\n Identifier for job.\n\n Returns\n -------\n None\n If at capacity and cannot provision more\n job_id : str\n Identifier for the job\n \"\"\"\n\n job_name = \"{0}.{1}\".format(job_name, time.time())\n\n script_path = os.path.abspath(\"{0}/{1}.submit\".format(self.script_dir, job_name))\n\n logger.debug(\"Requesting {} nodes_per_block, {} tasks_per_node\".format(\n self.nodes_per_block, tasks_per_node)\n )\n\n job_config = {}\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes_per_block\"] = self.nodes_per_block\n job_config[\"ncpus\"] = self.cpus_per_node\n job_config[\"walltime\"] = self.walltime\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(self.template_string, script_path, job_name, job_config)\n\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n submit_options = ''\n if self.queue is not None:\n submit_options = '{0} -q {1}'.format(submit_options, self.queue)\n if self.account is not None:\n submit_options = '{0} -A {1}'.format(submit_options, self.account)\n\n launch_cmd = \"qsub {0} {1}\".format(submit_options, channel_script_path)\n retcode, stdout, stderr = self.execute_wait(launch_cmd)\n\n job_id = None\n if retcode == 0:\n for line in stdout.split('\\n'):\n if line.strip():\n job_id = line.strip()\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n else:\n message = \"Command '{}' failed with return code {}\".format(launch_cmd, retcode)\n if (stdout is not None) and (stderr is not None):\n message += \"\\nstderr:{}\\nstdout{}\".format(stderr.strip(), stdout.strip())\n logger.error(message)\n\n return job_id\n\n @property\n def status_polling_interval(self):\n return 60\n", "path": "parsl/providers/pbspro/pbspro.py"}]}
2,509
569
gh_patches_debug_18321
rasdani/github-patches
git_diff
crytic__slither-2394
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> filter `name-reused` to only run on Truffle projects The detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile https://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61 https://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90 </issue> <code> [start of slither/detectors/slither/name_reused.py] 1 from collections import defaultdict 2 from typing import List 3 4 from slither.core.compilation_unit import SlitherCompilationUnit 5 from slither.core.declarations import Contract 6 from slither.detectors.abstract_detector import ( 7 AbstractDetector, 8 DetectorClassification, 9 DETECTOR_INFO, 10 ) 11 from slither.utils.output import Output 12 13 14 def _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]: 15 """ 16 Filter contracts with missing inheritance to return only the "most base" contracts 17 in the inheritance tree. 18 :param slither: 19 :return: 20 """ 21 missings = compilation_unit.contracts_with_missing_inheritance 22 23 ret = [] 24 for b in missings: 25 is_most_base = True 26 for inheritance in b.immediate_inheritance: 27 if inheritance in missings: 28 is_most_base = False 29 if is_most_base: 30 ret.append(b) 31 32 return ret 33 34 35 class NameReused(AbstractDetector): 36 ARGUMENT = "name-reused" 37 HELP = "Contract's name reused" 38 IMPACT = DetectorClassification.HIGH 39 CONFIDENCE = DetectorClassification.HIGH 40 41 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused" 42 43 WIKI_TITLE = "Name reused" 44 45 # region wiki_description 46 WIKI_DESCRIPTION = """If a codebase has two contracts the similar names, the compilation artifacts 47 will not contain one of the contracts with the duplicate name.""" 48 # endregion wiki_description 49 50 # region wiki_exploit_scenario 51 WIKI_EXPLOIT_SCENARIO = """ 52 Bob's `truffle` codebase has two contracts named `ERC20`. 53 When `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`. 54 As a result, the second contract cannot be analyzed. 55 """ 56 # endregion wiki_exploit_scenario 57 58 WIKI_RECOMMENDATION = "Rename the contract." 59 60 # pylint: disable=too-many-locals,too-many-branches 61 def _detect(self) -> List[Output]: 62 results = [] 63 compilation_unit = self.compilation_unit 64 65 all_contracts = compilation_unit.contracts 66 all_contracts_name = [c.name for c in all_contracts] 67 contracts_name_reused = { 68 contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1 69 } 70 71 names_reused = { 72 name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused 73 } 74 75 # First show the contracts that we know are missing 76 incorrectly_constructed = [ 77 contract 78 for contract in compilation_unit.contracts 79 if contract.is_incorrectly_constructed 80 ] 81 82 inheritance_corrupted = defaultdict(list) 83 for contract in incorrectly_constructed: 84 for father in contract.inheritance: 85 inheritance_corrupted[father.name].append(contract) 86 87 for contract_name, files in names_reused.items(): 88 info: DETECTOR_INFO = [contract_name, " is re-used:\n"] 89 for file in files: 90 if file is None: 91 info += ["\t- In an file not found, most likely in\n"] 92 else: 93 info += ["\t- ", file, "\n"] 94 95 if contract_name in inheritance_corrupted: 96 info += ["\tAs a result, the inherited contracts are not correctly analyzed:\n"] 97 for corrupted in inheritance_corrupted[contract_name]: 98 info += ["\t\t- ", corrupted, "\n"] 99 res = self.generate_result(info) 100 results.append(res) 101 102 # Then show the contracts for which one of the father was not found 103 # Here we are not able to know 104 most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit) 105 106 for b in most_base_with_missing_inheritance: 107 info = [b, " inherits from a contract for which the name is reused.\n"] 108 if b.inheritance: 109 info += ["\t- Slither could not determine which contract has a duplicate name:\n"] 110 for inheritance in b.inheritance: 111 info += ["\t\t-", inheritance, "\n"] 112 info += ["\t- Check if:\n"] 113 info += ["\t\t- A inherited contract is missing from this list,\n"] 114 info += ["\t\t- The contract are imported from the correct files.\n"] 115 if b.derived_contracts: 116 info += [f"\t- This issue impacts the contracts inheriting from {b.name}:\n"] 117 for derived in b.derived_contracts: 118 info += ["\t\t-", derived, "\n"] 119 res = self.generate_result(info) 120 results.append(res) 121 return results 122 [end of slither/detectors/slither/name_reused.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py --- a/slither/detectors/slither/name_reused.py +++ b/slither/detectors/slither/name_reused.py @@ -1,6 +1,8 @@ from collections import defaultdict from typing import List +from crytic_compile.platform import Type as PlatformType + from slither.core.compilation_unit import SlitherCompilationUnit from slither.core.declarations import Contract from slither.detectors.abstract_detector import ( @@ -61,6 +63,8 @@ def _detect(self) -> List[Output]: results = [] compilation_unit = self.compilation_unit + if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE: + return [] all_contracts = compilation_unit.contracts all_contracts_name = [c.name for c in all_contracts]
{"golden_diff": "diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py\n--- a/slither/detectors/slither/name_reused.py\n+++ b/slither/detectors/slither/name_reused.py\n@@ -1,6 +1,8 @@\n from collections import defaultdict\n from typing import List\n \n+from crytic_compile.platform import Type as PlatformType\n+\n from slither.core.compilation_unit import SlitherCompilationUnit\n from slither.core.declarations import Contract\n from slither.detectors.abstract_detector import (\n@@ -61,6 +63,8 @@\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\n+ return []\n \n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n", "issue": "filter `name-reused` to only run on Truffle projects\nThe detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile \r\nhttps://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61\r\n\r\nhttps://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90\n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import List\n\nfrom slither.core.compilation_unit import SlitherCompilationUnit\nfrom slither.core.declarations import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\ndef _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\n \"\"\"\n Filter contracts with missing inheritance to return only the \"most base\" contracts\n in the inheritance tree.\n :param slither:\n :return:\n \"\"\"\n missings = compilation_unit.contracts_with_missing_inheritance\n\n ret = []\n for b in missings:\n is_most_base = True\n for inheritance in b.immediate_inheritance:\n if inheritance in missings:\n is_most_base = False\n if is_most_base:\n ret.append(b)\n\n return ret\n\n\nclass NameReused(AbstractDetector):\n ARGUMENT = \"name-reused\"\n HELP = \"Contract's name reused\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\"\n\n WIKI_TITLE = \"Name reused\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"If a codebase has two contracts the similar names, the compilation artifacts\nwill not contain one of the contracts with the duplicate name.\"\"\"\n # endregion wiki_description\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\nBob's `truffle` codebase has two contracts named `ERC20`.\nWhen `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\nAs a result, the second contract cannot be analyzed.\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Rename the contract.\"\n\n # pylint: disable=too-many-locals,too-many-branches\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n\n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n contracts_name_reused = {\n contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\n }\n\n names_reused = {\n name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\n }\n\n # First show the contracts that we know are missing\n incorrectly_constructed = [\n contract\n for contract in compilation_unit.contracts\n if contract.is_incorrectly_constructed\n ]\n\n inheritance_corrupted = defaultdict(list)\n for contract in incorrectly_constructed:\n for father in contract.inheritance:\n inheritance_corrupted[father.name].append(contract)\n\n for contract_name, files in names_reused.items():\n info: DETECTOR_INFO = [contract_name, \" is re-used:\\n\"]\n for file in files:\n if file is None:\n info += [\"\\t- In an file not found, most likely in\\n\"]\n else:\n info += [\"\\t- \", file, \"\\n\"]\n\n if contract_name in inheritance_corrupted:\n info += [\"\\tAs a result, the inherited contracts are not correctly analyzed:\\n\"]\n for corrupted in inheritance_corrupted[contract_name]:\n info += [\"\\t\\t- \", corrupted, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n # Then show the contracts for which one of the father was not found\n # Here we are not able to know\n most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\n\n for b in most_base_with_missing_inheritance:\n info = [b, \" inherits from a contract for which the name is reused.\\n\"]\n if b.inheritance:\n info += [\"\\t- Slither could not determine which contract has a duplicate name:\\n\"]\n for inheritance in b.inheritance:\n info += [\"\\t\\t-\", inheritance, \"\\n\"]\n info += [\"\\t- Check if:\\n\"]\n info += [\"\\t\\t- A inherited contract is missing from this list,\\n\"]\n info += [\"\\t\\t- The contract are imported from the correct files.\\n\"]\n if b.derived_contracts:\n info += [f\"\\t- This issue impacts the contracts inheriting from {b.name}:\\n\"]\n for derived in b.derived_contracts:\n info += [\"\\t\\t-\", derived, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n return results\n", "path": "slither/detectors/slither/name_reused.py"}]}
2,027
205
gh_patches_debug_16712
rasdani/github-patches
git_diff
hydroshare__hydroshare-2263
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Metadata strings updated through hs_restclient are parsed incorrectly When using the hs_restclient to update metadata for Generic and Composite resources (I haven't tested other types), string values are updated as a unicode string inside an array (e.g. [u'some_string'] ). Here's an example code snippet: ` put_data = {"title": "New Title", "description": "New Description"} client.updateScienceMetadata('f44c00556cd847b98dd47f3a6279014d', put_data) ` Two resources that show this issue: ![image](https://user-images.githubusercontent.com/8953221/28534607-84703f50-705e-11e7-9e73-5edd5058368f.png) ![image](https://user-images.githubusercontent.com/8953221/28534643-984a8530-705e-11e7-84d7-df0ba05379f5.png) </issue> <code> [start of hs_core/views/resource_metadata_rest_api.py] 1 import logging 2 3 from rest_framework.response import Response 4 from rest_framework.exceptions import ValidationError 5 from rest_framework import status 6 from rest_framework import generics 7 from rest_framework import serializers 8 9 from hs_core import hydroshare 10 from hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \ 11 ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation 12 from hs_core.views import utils as view_utils 13 from hs_core.views.utils import ACTION_TO_AUTHORIZE 14 15 logger = logging.getLogger(__name__) 16 17 18 class ExternalProfileLinkSerializer(serializers.Serializer): 19 type = serializers.CharField(required=False) 20 url = serializers.URLField(required=False) 21 object_id = serializers.IntegerField(required=False) 22 # content_type = models.ForeignKey(ContentType) 23 # content_object = GenericForeignKey('content_type', 'object_id') 24 25 class Meta: 26 model = ExternalProfileLink 27 28 29 class PartySerializer(serializers.Serializer): 30 name = serializers.CharField() 31 description = serializers.URLField(required=False) 32 organization = serializers.CharField(required=False) 33 email = serializers.EmailField(required=False) 34 address = serializers.CharField(required=False) 35 phone = serializers.CharField(required=False) 36 homepage = serializers.URLField(required=False) 37 external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True) 38 39 class Meta: 40 model = Creator 41 fields = {'name', 'description', 'organization', 'email', 42 'address', 'phone', 'homepage', 'external_links'} 43 44 45 class CreatorSerializer(PartySerializer): 46 order = serializers.IntegerField(required=False) 47 48 class Meta: 49 model = Contributor 50 51 52 class DateSerializer(serializers.Serializer): 53 # term = 'Date' 54 type = serializers.CharField(required=False) 55 start_date = serializers.DateTimeField(required=False) 56 end_date = serializers.DateTimeField(required=False) 57 58 class Meta: 59 model = Date 60 61 62 class CoverageSerializer(serializers.Serializer): 63 type = serializers.CharField(required=False) 64 value = serializers.SerializerMethodField(required=False) 65 66 class Meta: 67 model = Coverage 68 69 def get_value(self, obj): 70 return obj.value 71 72 73 class FormatSerializer(serializers.Serializer): 74 value = serializers.CharField(required=False) 75 76 class Meta: 77 model = Format 78 79 80 class FundingAgencySerializer(serializers.Serializer): 81 agency_name = serializers.CharField() 82 award_title = serializers.CharField(required=False) 83 award_number = serializers.CharField(required=False) 84 agency_url = serializers.URLField(required=False) 85 86 class Meta: 87 model = FundingAgency 88 89 90 class IdentifierSerializer(serializers.Serializer): 91 name = serializers.CharField(required=False) 92 url = serializers.URLField(required=False) 93 94 class Meta: 95 model = Identifier 96 97 98 class SubjectSerializer(serializers.Serializer): 99 value = serializers.CharField(required=False) 100 101 class Meta: 102 model = Subject 103 104 105 class SourceSerializer(serializers.Serializer): 106 derived_from = serializers.CharField(required=False) 107 108 class Meta: 109 model = Source 110 111 112 class RelationSerializer(serializers.Serializer): 113 type = serializers.CharField(required=False) 114 value = serializers.CharField(required=False) 115 116 class Meta: 117 model = Relation 118 119 120 class CoreMetaDataSerializer(serializers.Serializer): 121 title = serializers.CharField(required=False) 122 creators = CreatorSerializer(required=False, many=True) 123 contributors = PartySerializer(required=False, many=True) 124 coverages = CoverageSerializer(required=False, many=True) 125 dates = DateSerializer(required=False, many=True) 126 description = serializers.CharField(required=False) 127 formats = FormatSerializer(required=False, many=True) 128 funding_agencies = FundingAgencySerializer(required=False, many=True) 129 identifiers = IdentifierSerializer(required=False, many=True) 130 language = serializers.CharField(required=False) 131 rights = serializers.CharField(required=False) 132 type = serializers.CharField(required=False) 133 publisher = serializers.CharField(required=False) 134 sources = SourceSerializer(required=False, many=True) 135 subjects = SubjectSerializer(required=False, many=True) 136 relations = RelationSerializer(required=False, many=True) 137 138 class Meta: 139 model = CoreMetaData 140 141 142 class MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView): 143 """ 144 Retrieve resource science (Dublin Core) metadata 145 146 REST URL: /hsapi/resource/{pk}/scimeta/elements/ 147 HTTP method: GET 148 149 :type pk: str 150 :param pk: id of the resource 151 :return: resource science metadata as JSON document 152 :rtype: str 153 :raises: 154 NotFound: return json format: {'detail': 'No resource was found for resource id:pk'} 155 PermissionDenied: return json format: {'detail': 'You do not have permission to perform 156 this action.'} 157 158 REST URL: /hsapi/resource/{pk}/scimeta/elements/ 159 HTTP method: PUT 160 161 :type pk: str 162 :param pk: id of the resource 163 :type request: JSON formatted string 164 :param request: resource metadata 165 :return: updated resource science metadata as JSON document 166 :rtype: str 167 :raises: 168 NotFound: return json format: {'detail': 'No resource was found for resource id':pk} 169 PermissionDenied: return json format: {'detail': 'You do not have permission to perform 170 this action.'} 171 ValidationError: return json format: {parameter-1': ['error message-1'], 172 'parameter-2': ['error message-2'], .. } 173 """ 174 ACCEPT_FORMATS = ('application/json',) 175 176 allowed_methods = ('GET', 'PUT') 177 178 serializer_class = CoreMetaDataSerializer 179 180 def get(self, request, pk): 181 view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA) 182 resource = hydroshare.get_resource_by_shortkey(shortkey=pk) 183 serializer = resource.metadata.serializer 184 return Response(data=serializer.data, status=status.HTTP_200_OK) 185 186 def put(self, request, pk): 187 # Update science metadata 188 resource, _, _ = view_utils.authorize( 189 request, pk, 190 needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE) 191 192 metadata = [] 193 put_data = request.data.copy() 194 195 try: 196 resource.metadata.parse_for_bulk_update(put_data, metadata) 197 hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user) 198 except Exception as ex: 199 error_msg = { 200 'resource': "Resource metadata update failed: %s, %s" 201 % (ex.__class__, ex.message) 202 } 203 raise ValidationError(detail=error_msg) 204 205 resource = hydroshare.get_resource_by_shortkey(shortkey=pk) 206 serializer = resource.metadata.serializer 207 return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED) 208 [end of hs_core/views/resource_metadata_rest_api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py --- a/hs_core/views/resource_metadata_rest_api.py +++ b/hs_core/views/resource_metadata_rest_api.py @@ -1,5 +1,7 @@ import logging +from django.http import QueryDict + from rest_framework.response import Response from rest_framework.exceptions import ValidationError from rest_framework import status @@ -192,6 +194,10 @@ metadata = [] put_data = request.data.copy() + # convert the QueryDict to dict + if isinstance(put_data, QueryDict): + put_data = put_data.dict() + try: resource.metadata.parse_for_bulk_update(put_data, metadata) hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)
{"golden_diff": "diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py\n--- a/hs_core/views/resource_metadata_rest_api.py\n+++ b/hs_core/views/resource_metadata_rest_api.py\n@@ -1,5 +1,7 @@\n import logging\n \n+from django.http import QueryDict\n+\n from rest_framework.response import Response\n from rest_framework.exceptions import ValidationError\n from rest_framework import status\n@@ -192,6 +194,10 @@\n metadata = []\n put_data = request.data.copy()\n \n+ # convert the QueryDict to dict\n+ if isinstance(put_data, QueryDict):\n+ put_data = put_data.dict()\n+\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n", "issue": "Metadata strings updated through hs_restclient are parsed incorrectly\nWhen using the hs_restclient to update metadata for Generic and Composite resources (I haven't tested other types), string values are updated as a unicode string inside an array (e.g. [u'some_string'] ). \r\n\r\nHere's an example code snippet:\r\n`\r\nput_data = {\"title\": \"New Title\", \"description\": \"New Description\"}\r\nclient.updateScienceMetadata('f44c00556cd847b98dd47f3a6279014d', put_data)\r\n`\r\n\r\nTwo resources that show this issue:\r\n![image](https://user-images.githubusercontent.com/8953221/28534607-84703f50-705e-11e7-9e73-5edd5058368f.png)\r\n![image](https://user-images.githubusercontent.com/8953221/28534643-984a8530-705e-11e7-84d7-df0ba05379f5.png)\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework import serializers\n\nfrom hs_core import hydroshare\nfrom hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \\\n ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation\nfrom hs_core.views import utils as view_utils\nfrom hs_core.views.utils import ACTION_TO_AUTHORIZE\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExternalProfileLinkSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n object_id = serializers.IntegerField(required=False)\n # content_type = models.ForeignKey(ContentType)\n # content_object = GenericForeignKey('content_type', 'object_id')\n\n class Meta:\n model = ExternalProfileLink\n\n\nclass PartySerializer(serializers.Serializer):\n name = serializers.CharField()\n description = serializers.URLField(required=False)\n organization = serializers.CharField(required=False)\n email = serializers.EmailField(required=False)\n address = serializers.CharField(required=False)\n phone = serializers.CharField(required=False)\n homepage = serializers.URLField(required=False)\n external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)\n\n class Meta:\n model = Creator\n fields = {'name', 'description', 'organization', 'email',\n 'address', 'phone', 'homepage', 'external_links'}\n\n\nclass CreatorSerializer(PartySerializer):\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = Contributor\n\n\nclass DateSerializer(serializers.Serializer):\n # term = 'Date'\n type = serializers.CharField(required=False)\n start_date = serializers.DateTimeField(required=False)\n end_date = serializers.DateTimeField(required=False)\n\n class Meta:\n model = Date\n\n\nclass CoverageSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.SerializerMethodField(required=False)\n\n class Meta:\n model = Coverage\n\n def get_value(self, obj):\n return obj.value\n\n\nclass FormatSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Format\n\n\nclass FundingAgencySerializer(serializers.Serializer):\n agency_name = serializers.CharField()\n award_title = serializers.CharField(required=False)\n award_number = serializers.CharField(required=False)\n agency_url = serializers.URLField(required=False)\n\n class Meta:\n model = FundingAgency\n\n\nclass IdentifierSerializer(serializers.Serializer):\n name = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n\n class Meta:\n model = Identifier\n\n\nclass SubjectSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Subject\n\n\nclass SourceSerializer(serializers.Serializer):\n derived_from = serializers.CharField(required=False)\n\n class Meta:\n model = Source\n\n\nclass RelationSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Relation\n\n\nclass CoreMetaDataSerializer(serializers.Serializer):\n title = serializers.CharField(required=False)\n creators = CreatorSerializer(required=False, many=True)\n contributors = PartySerializer(required=False, many=True)\n coverages = CoverageSerializer(required=False, many=True)\n dates = DateSerializer(required=False, many=True)\n description = serializers.CharField(required=False)\n formats = FormatSerializer(required=False, many=True)\n funding_agencies = FundingAgencySerializer(required=False, many=True)\n identifiers = IdentifierSerializer(required=False, many=True)\n language = serializers.CharField(required=False)\n rights = serializers.CharField(required=False)\n type = serializers.CharField(required=False)\n publisher = serializers.CharField(required=False)\n sources = SourceSerializer(required=False, many=True)\n subjects = SubjectSerializer(required=False, many=True)\n relations = RelationSerializer(required=False, many=True)\n\n class Meta:\n model = CoreMetaData\n\n\nclass MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Retrieve resource science (Dublin Core) metadata\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: GET\n\n :type pk: str\n :param pk: id of the resource\n :return: resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: PUT\n\n :type pk: str\n :param pk: id of the resource\n :type request: JSON formatted string\n :param request: resource metadata\n :return: updated resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id':pk}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n ValidationError: return json format: {parameter-1': ['error message-1'],\n 'parameter-2': ['error message-2'], .. }\n \"\"\"\n ACCEPT_FORMATS = ('application/json',)\n\n allowed_methods = ('GET', 'PUT')\n\n serializer_class = CoreMetaDataSerializer\n\n def get(self, request, pk):\n view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, pk):\n # Update science metadata\n resource, _, _ = view_utils.authorize(\n request, pk,\n needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\n\n metadata = []\n put_data = request.data.copy()\n\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n except Exception as ex:\n error_msg = {\n 'resource': \"Resource metadata update failed: %s, %s\"\n % (ex.__class__, ex.message)\n }\n raise ValidationError(detail=error_msg)\n\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\n", "path": "hs_core/views/resource_metadata_rest_api.py"}]}
2,703
183
gh_patches_debug_4452
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3517
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Broken link Hello, I found that the link at [https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/core/google/cloud/client.py#L33][1] is broken. https://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html [1]: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/661816540f1387bcc6e08b0fd722f4abae585b37/core/google/cloud/client.py#L33 </issue> <code> [start of core/google/cloud/client.py] 1 # Copyright 2015 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Base classes for client used to interact with Google Cloud APIs.""" 16 17 import io 18 import json 19 from pickle import PicklingError 20 21 import google.auth.credentials 22 from google.oauth2 import service_account 23 import google_auth_httplib2 24 import six 25 26 from google.cloud._helpers import _determine_default_project 27 from google.cloud.credentials import get_credentials 28 29 30 _GOOGLE_AUTH_CREDENTIALS_HELP = ( 31 'This library only supports credentials from google-auth-library-python. ' 32 'See https://google-cloud-python.readthedocs.io/en/latest/' 33 'google-cloud-auth.html for help on authentication with this library.' 34 ) 35 36 37 class _ClientFactoryMixin(object): 38 """Mixin to allow factories that create credentials. 39 40 .. note:: 41 42 This class is virtual. 43 """ 44 45 _SET_PROJECT = False 46 47 @classmethod 48 def from_service_account_json(cls, json_credentials_path, *args, **kwargs): 49 """Factory to retrieve JSON credentials while creating client. 50 51 :type json_credentials_path: str 52 :param json_credentials_path: The path to a private key file (this file 53 was given to you when you created the 54 service account). This file must contain 55 a JSON object with a private key and 56 other credentials information (downloaded 57 from the Google APIs console). 58 59 :type args: tuple 60 :param args: Remaining positional arguments to pass to constructor. 61 62 :type kwargs: dict 63 :param kwargs: Remaining keyword arguments to pass to constructor. 64 65 :rtype: :class:`_ClientFactoryMixin` 66 :returns: The client created with the retrieved JSON credentials. 67 :raises: :class:`TypeError` if there is a conflict with the kwargs 68 and the credentials created by the factory. 69 """ 70 if 'credentials' in kwargs: 71 raise TypeError('credentials must not be in keyword arguments') 72 with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi: 73 credentials_info = json.load(json_fi) 74 credentials = service_account.Credentials.from_service_account_info( 75 credentials_info) 76 if cls._SET_PROJECT: 77 if 'project' not in kwargs: 78 kwargs['project'] = credentials_info.get('project_id') 79 80 kwargs['credentials'] = credentials 81 return cls(*args, **kwargs) 82 83 84 class Client(_ClientFactoryMixin): 85 """Client to bundle configuration needed for API requests. 86 87 Stores ``credentials`` and an HTTP object so that subclasses 88 can pass them along to a connection class. 89 90 If no value is passed in for ``_http``, a :class:`httplib2.Http` object 91 will be created and authorized with the ``credentials``. If not, the 92 ``credentials`` and ``_http`` need not be related. 93 94 Callers and subclasses may seek to use the private key from 95 ``credentials`` to sign data. 96 97 A custom (non-``httplib2``) HTTP object must have a ``request`` method 98 which accepts the following arguments: 99 100 * ``uri`` 101 * ``method`` 102 * ``body`` 103 * ``headers`` 104 105 In addition, ``redirections`` and ``connection_type`` may be used. 106 107 A custom ``_http`` object will also need to be able to add a bearer token 108 to API requests and handle token refresh on 401 errors. 109 110 :type credentials: :class:`~google.auth.credentials.Credentials` 111 :param credentials: (Optional) The OAuth2 Credentials to use for this 112 client. If not passed (and if no ``_http`` object is 113 passed), falls back to the default inferred from the 114 environment. 115 116 :type _http: :class:`~httplib2.Http` 117 :param _http: (Optional) HTTP object to make requests. Can be any object 118 that defines ``request()`` with the same interface as 119 :meth:`~httplib2.Http.request`. If not passed, an 120 ``_http`` object is created that is bound to the 121 ``credentials`` for the current object. 122 This parameter should be considered private, and could 123 change in the future. 124 """ 125 126 SCOPE = None 127 """The scopes required for authenticating with a service. 128 129 Needs to be set by subclasses. 130 """ 131 132 def __init__(self, credentials=None, _http=None): 133 if (credentials is not None and 134 not isinstance( 135 credentials, google.auth.credentials.Credentials)): 136 raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP) 137 if credentials is None and _http is None: 138 credentials = get_credentials() 139 self._credentials = google.auth.credentials.with_scopes_if_required( 140 credentials, self.SCOPE) 141 self._http_internal = _http 142 143 def __getstate__(self): 144 """Explicitly state that clients are not pickleable.""" 145 raise PicklingError('\n'.join([ 146 'Pickling client objects is explicitly not supported.', 147 'Clients have non-trivial state that is local and unpickleable.', 148 ])) 149 150 @property 151 def _http(self): 152 """Getter for object used for HTTP transport. 153 154 :rtype: :class:`~httplib2.Http` 155 :returns: An HTTP object. 156 """ 157 if self._http_internal is None: 158 self._http_internal = google_auth_httplib2.AuthorizedHttp( 159 self._credentials) 160 return self._http_internal 161 162 163 class _ClientProjectMixin(object): 164 """Mixin to allow setting the project on the client. 165 166 :type project: str 167 :param project: the project which the client acts on behalf of. If not 168 passed falls back to the default inferred from the 169 environment. 170 171 :raises: :class:`EnvironmentError` if the project is neither passed in nor 172 set in the environment. :class:`ValueError` if the project value 173 is invalid. 174 """ 175 176 def __init__(self, project=None): 177 project = self._determine_default(project) 178 if project is None: 179 raise EnvironmentError('Project was not passed and could not be ' 180 'determined from the environment.') 181 if isinstance(project, six.binary_type): 182 project = project.decode('utf-8') 183 if not isinstance(project, six.string_types): 184 raise ValueError('Project must be a string.') 185 self.project = project 186 187 @staticmethod 188 def _determine_default(project): 189 """Helper: use default project detection.""" 190 return _determine_default_project(project) 191 192 193 class ClientWithProject(Client, _ClientProjectMixin): 194 """Client that also stores a project. 195 196 :type project: str 197 :param project: the project which the client acts on behalf of. If not 198 passed falls back to the default inferred from the 199 environment. 200 201 :type credentials: :class:`~google.auth.credentials.Credentials` 202 :param credentials: (Optional) The OAuth2 Credentials to use for this 203 client. If not passed (and if no ``_http`` object is 204 passed), falls back to the default inferred from the 205 environment. 206 207 :type _http: :class:`~httplib2.Http` 208 :param _http: (Optional) HTTP object to make requests. Can be any object 209 that defines ``request()`` with the same interface as 210 :meth:`~httplib2.Http.request`. If not passed, an 211 ``_http`` object is created that is bound to the 212 ``credentials`` for the current object. 213 This parameter should be considered private, and could 214 change in the future. 215 216 :raises: :class:`ValueError` if the project is neither passed in nor 217 set in the environment. 218 """ 219 220 _SET_PROJECT = True # Used by from_service_account_json() 221 222 def __init__(self, project=None, credentials=None, _http=None): 223 _ClientProjectMixin.__init__(self, project=project) 224 Client.__init__(self, credentials=credentials, _http=_http) 225 [end of core/google/cloud/client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py --- a/core/google/cloud/client.py +++ b/core/google/cloud/client.py @@ -29,8 +29,8 @@ _GOOGLE_AUTH_CREDENTIALS_HELP = ( 'This library only supports credentials from google-auth-library-python. ' - 'See https://google-cloud-python.readthedocs.io/en/latest/' - 'google-cloud-auth.html for help on authentication with this library.' + 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html ' + 'for help on authentication with this library.' )
{"golden_diff": "diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py\n--- a/core/google/cloud/client.py\n+++ b/core/google/cloud/client.py\n@@ -29,8 +29,8 @@\n \n _GOOGLE_AUTH_CREDENTIALS_HELP = (\n 'This library only supports credentials from google-auth-library-python. '\n- 'See https://google-cloud-python.readthedocs.io/en/latest/'\n- 'google-cloud-auth.html for help on authentication with this library.'\n+ 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html '\n+ 'for help on authentication with this library.'\n )\n", "issue": "Broken link\nHello,\r\n\r\nI found that the link at [https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/core/google/cloud/client.py#L33][1] is broken.\r\n\r\nhttps://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html\r\n\r\n[1]: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/661816540f1387bcc6e08b0fd722f4abae585b37/core/google/cloud/client.py#L33\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n\nimport io\nimport json\nfrom pickle import PicklingError\n\nimport google.auth.credentials\nfrom google.oauth2 import service_account\nimport google_auth_httplib2\nimport six\n\nfrom google.cloud._helpers import _determine_default_project\nfrom google.cloud.credentials import get_credentials\n\n\n_GOOGLE_AUTH_CREDENTIALS_HELP = (\n 'This library only supports credentials from google-auth-library-python. '\n 'See https://google-cloud-python.readthedocs.io/en/latest/'\n 'google-cloud-auth.html for help on authentication with this library.'\n)\n\n\nclass _ClientFactoryMixin(object):\n \"\"\"Mixin to allow factories that create credentials.\n\n .. note::\n\n This class is virtual.\n \"\"\"\n\n _SET_PROJECT = False\n\n @classmethod\n def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\n \"\"\"Factory to retrieve JSON credentials while creating client.\n\n :type json_credentials_path: str\n :param json_credentials_path: The path to a private key file (this file\n was given to you when you created the\n service account). This file must contain\n a JSON object with a private key and\n other credentials information (downloaded\n from the Google APIs console).\n\n :type args: tuple\n :param args: Remaining positional arguments to pass to constructor.\n\n :type kwargs: dict\n :param kwargs: Remaining keyword arguments to pass to constructor.\n\n :rtype: :class:`_ClientFactoryMixin`\n :returns: The client created with the retrieved JSON credentials.\n :raises: :class:`TypeError` if there is a conflict with the kwargs\n and the credentials created by the factory.\n \"\"\"\n if 'credentials' in kwargs:\n raise TypeError('credentials must not be in keyword arguments')\n with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi:\n credentials_info = json.load(json_fi)\n credentials = service_account.Credentials.from_service_account_info(\n credentials_info)\n if cls._SET_PROJECT:\n if 'project' not in kwargs:\n kwargs['project'] = credentials_info.get('project_id')\n\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n\nclass Client(_ClientFactoryMixin):\n \"\"\"Client to bundle configuration needed for API requests.\n\n Stores ``credentials`` and an HTTP object so that subclasses\n can pass them along to a connection class.\n\n If no value is passed in for ``_http``, a :class:`httplib2.Http` object\n will be created and authorized with the ``credentials``. If not, the\n ``credentials`` and ``_http`` need not be related.\n\n Callers and subclasses may seek to use the private key from\n ``credentials`` to sign data.\n\n A custom (non-``httplib2``) HTTP object must have a ``request`` method\n which accepts the following arguments:\n\n * ``uri``\n * ``method``\n * ``body``\n * ``headers``\n\n In addition, ``redirections`` and ``connection_type`` may be used.\n\n A custom ``_http`` object will also need to be able to add a bearer token\n to API requests and handle token refresh on 401 errors.\n\n :type credentials: :class:`~google.auth.credentials.Credentials`\n :param credentials: (Optional) The OAuth2 Credentials to use for this\n client. If not passed (and if no ``_http`` object is\n passed), falls back to the default inferred from the\n environment.\n\n :type _http: :class:`~httplib2.Http`\n :param _http: (Optional) HTTP object to make requests. Can be any object\n that defines ``request()`` with the same interface as\n :meth:`~httplib2.Http.request`. If not passed, an\n ``_http`` object is created that is bound to the\n ``credentials`` for the current object.\n This parameter should be considered private, and could\n change in the future.\n \"\"\"\n\n SCOPE = None\n \"\"\"The scopes required for authenticating with a service.\n\n Needs to be set by subclasses.\n \"\"\"\n\n def __init__(self, credentials=None, _http=None):\n if (credentials is not None and\n not isinstance(\n credentials, google.auth.credentials.Credentials)):\n raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)\n if credentials is None and _http is None:\n credentials = get_credentials()\n self._credentials = google.auth.credentials.with_scopes_if_required(\n credentials, self.SCOPE)\n self._http_internal = _http\n\n def __getstate__(self):\n \"\"\"Explicitly state that clients are not pickleable.\"\"\"\n raise PicklingError('\\n'.join([\n 'Pickling client objects is explicitly not supported.',\n 'Clients have non-trivial state that is local and unpickleable.',\n ]))\n\n @property\n def _http(self):\n \"\"\"Getter for object used for HTTP transport.\n\n :rtype: :class:`~httplib2.Http`\n :returns: An HTTP object.\n \"\"\"\n if self._http_internal is None:\n self._http_internal = google_auth_httplib2.AuthorizedHttp(\n self._credentials)\n return self._http_internal\n\n\nclass _ClientProjectMixin(object):\n \"\"\"Mixin to allow setting the project on the client.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :raises: :class:`EnvironmentError` if the project is neither passed in nor\n set in the environment. :class:`ValueError` if the project value\n is invalid.\n \"\"\"\n\n def __init__(self, project=None):\n project = self._determine_default(project)\n if project is None:\n raise EnvironmentError('Project was not passed and could not be '\n 'determined from the environment.')\n if isinstance(project, six.binary_type):\n project = project.decode('utf-8')\n if not isinstance(project, six.string_types):\n raise ValueError('Project must be a string.')\n self.project = project\n\n @staticmethod\n def _determine_default(project):\n \"\"\"Helper: use default project detection.\"\"\"\n return _determine_default_project(project)\n\n\nclass ClientWithProject(Client, _ClientProjectMixin):\n \"\"\"Client that also stores a project.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :type credentials: :class:`~google.auth.credentials.Credentials`\n :param credentials: (Optional) The OAuth2 Credentials to use for this\n client. If not passed (and if no ``_http`` object is\n passed), falls back to the default inferred from the\n environment.\n\n :type _http: :class:`~httplib2.Http`\n :param _http: (Optional) HTTP object to make requests. Can be any object\n that defines ``request()`` with the same interface as\n :meth:`~httplib2.Http.request`. If not passed, an\n ``_http`` object is created that is bound to the\n ``credentials`` for the current object.\n This parameter should be considered private, and could\n change in the future.\n\n :raises: :class:`ValueError` if the project is neither passed in nor\n set in the environment.\n \"\"\"\n\n _SET_PROJECT = True # Used by from_service_account_json()\n\n def __init__(self, project=None, credentials=None, _http=None):\n _ClientProjectMixin.__init__(self, project=project)\n Client.__init__(self, credentials=credentials, _http=_http)\n", "path": "core/google/cloud/client.py"}]}
3,054
131
gh_patches_debug_42452
rasdani/github-patches
git_diff
freedomofpress__securedrop-6681
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> runtime `i18n` configuration manipulates global state I guess we already do this for LOCALES, but I think continuing the pattern should be accompanied by a TODO that manipulating/relying on global state is not desirable. _Originally posted by @legoktm in https://github.com/freedomofpress/securedrop/pull/6406#discussion_r863080227_ </issue> <code> [start of securedrop/i18n.py] 1 # 2 # SecureDrop whistleblower submission system 3 # Copyright (C) 2017 Loic Dachary <[email protected]> 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU Affero General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU Affero General Public License for more details. 14 # 15 # You should have received a copy of the GNU Affero General Public License 16 # along with this program. If not, see <http://www.gnu.org/licenses/>. 17 # 18 import collections 19 from typing import Dict, List, Set 20 21 from babel.core import ( 22 Locale, 23 UnknownLocaleError, 24 get_locale_identifier, 25 negotiate_locale, 26 parse_locale, 27 ) 28 from flask import Flask, g, request, session 29 from flask_babel import Babel 30 from sdconfig import FALLBACK_LOCALE, SDConfig 31 32 33 class RequestLocaleInfo: 34 """ 35 Convenience wrapper around a babel.core.Locale. 36 """ 37 38 def __init__(self, locale: str): 39 self.locale = Locale.parse(locale) 40 41 # This attribute can be set to `True` to differentiate multiple 42 # locales currently available (supported) for the same language. 43 self.use_display_name = False 44 45 def __str__(self) -> str: 46 """ 47 The Babel string representation of the locale. 48 """ 49 return str(self.locale) 50 51 @property 52 def display_name(self) -> str: 53 """ 54 Give callers (i.e., templates) the `Locale` object's display name when 55 such resolution is warranted, otherwise the language name---as 56 determined by `map_locale_display_names()`. 57 """ 58 if self.use_display_name: 59 return self.locale.display_name 60 return self.locale.language_name 61 62 @property 63 def text_direction(self) -> str: 64 """ 65 The Babel text direction: ltr or rtl. 66 67 Used primarily to set text direction in HTML via the "dir" 68 attribute. 69 """ 70 return self.locale.text_direction 71 72 @property 73 def language(self) -> str: 74 """ 75 The Babel language name. 76 77 Just the language, without subtag info like region or script. 78 """ 79 return self.locale.language 80 81 @property 82 def id(self) -> str: 83 """ 84 The Babel string representation of the locale. 85 86 This should match the name of the directory containing its 87 translations. 88 """ 89 return str(self.locale) 90 91 @property 92 def language_tag(self) -> str: 93 """ 94 Returns a BCP47/RFC5646 language tag for the locale. 95 96 Language tags are used in HTTP headers and the HTML lang 97 attribute. 98 """ 99 return get_locale_identifier(parse_locale(str(self.locale)), sep="-") 100 101 102 def configure_babel(config: SDConfig, app: Flask) -> Babel: 103 """ 104 Set up Flask-Babel according to the SecureDrop configuration. 105 """ 106 # Tell Babel where to find our translations. 107 translations_directory = str(config.TRANSLATION_DIRS.absolute()) 108 app.config["BABEL_TRANSLATION_DIRECTORIES"] = translations_directory 109 110 # Create the app's Babel instance. Passing the app to the 111 # constructor causes the instance to attach itself to the app. 112 babel = Babel(app) 113 114 # verify that Babel is only using the translations we told it about 115 if list(babel.translation_directories) != [translations_directory]: 116 raise ValueError( 117 "Babel translation directories ({}) do not match SecureDrop configuration ({})".format( 118 babel.translation_directories, [translations_directory] 119 ) 120 ) 121 122 # register the function used to determine the locale of a request 123 babel.localeselector(lambda: get_locale(config)) 124 return babel 125 126 127 def parse_locale_set(codes: List[str]) -> Set[Locale]: 128 return {Locale.parse(code) for code in codes} 129 130 131 def validate_locale_configuration(config: SDConfig, babel: Babel) -> None: 132 """ 133 Check that configured locales are available in the filesystem and therefore usable by 134 Babel. Warn about configured locales that are not usable, unless we're left with 135 no usable default or fallback locale, in which case raise an exception. 136 """ 137 # These locales are available and loadable from the filesystem. 138 available = set(babel.list_translations()) 139 available.add(Locale.parse(FALLBACK_LOCALE)) 140 141 # These locales were configured via "securedrop-admin sdconfig", meaning 142 # they were present on the Admin Workstation at "securedrop-admin" runtime. 143 configured = parse_locale_set(config.SUPPORTED_LOCALES) 144 145 # The intersection of these sets is the set of locales usable by Babel. 146 usable = available & configured 147 148 missing = configured - usable 149 if missing: 150 babel.app.logger.error( 151 f"Configured locales {missing} are not in the set of usable locales {usable}" 152 ) 153 154 defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE]) 155 if not defaults & usable: 156 raise ValueError( 157 f"None of the default locales {defaults} are in the set of usable locales {usable}" 158 ) 159 160 global USABLE_LOCALES 161 USABLE_LOCALES = usable 162 163 164 # TODO(#6420): avoid relying on and manipulating on this global state 165 LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo] 166 USABLE_LOCALES = set() # type: Set[Locale] 167 168 169 def map_locale_display_names(config: SDConfig) -> None: 170 """ 171 Create a map of locale identifiers to names for display. 172 173 For most of our supported languages, we only provide one 174 translation, so including the full display name is not necessary 175 to distinguish them. For languages with more than one translation, 176 like Chinese, we do need the additional detail. 177 """ 178 179 language_locale_counts = collections.defaultdict(int) # type: Dict[str, int] 180 for l in sorted(config.SUPPORTED_LOCALES): 181 locale = RequestLocaleInfo(l) 182 language_locale_counts[locale.language] += 1 183 184 locale_map = collections.OrderedDict() 185 for l in sorted(config.SUPPORTED_LOCALES): 186 if Locale.parse(l) not in USABLE_LOCALES: 187 continue 188 189 locale = RequestLocaleInfo(l) 190 if language_locale_counts[locale.language] > 1: 191 # Disambiguate translations for this language. 192 locale.use_display_name = True 193 194 locale_map[str(locale)] = locale 195 196 global LOCALES 197 LOCALES = locale_map 198 199 200 def configure(config: SDConfig, app: Flask) -> None: 201 babel = configure_babel(config, app) 202 validate_locale_configuration(config, babel) 203 map_locale_display_names(config) 204 205 206 def get_locale(config: SDConfig) -> str: 207 """ 208 Return the best supported locale for a request. 209 210 Get the locale as follows, by order of precedence: 211 - l request argument or session['locale'] 212 - browser suggested locale, from the Accept-Languages header 213 - config.DEFAULT_LOCALE 214 - config.FALLBACK_LOCALE 215 """ 216 preferences = [] 217 if session and session.get("locale"): 218 preferences.append(session.get("locale")) 219 if request.args.get("l"): 220 preferences.insert(0, request.args.get("l")) 221 if not preferences: 222 preferences.extend(get_accepted_languages()) 223 preferences.append(config.DEFAULT_LOCALE) 224 preferences.append(FALLBACK_LOCALE) 225 226 negotiated = negotiate_locale(preferences, LOCALES.keys()) 227 228 if not negotiated: 229 raise ValueError("No usable locale") 230 231 return negotiated 232 233 234 def get_accepted_languages() -> List[str]: 235 """ 236 Convert a request's list of accepted languages into locale identifiers. 237 """ 238 accept_languages = [] 239 for l in request.accept_languages.values(): 240 try: 241 parsed = Locale.parse(l, "-") 242 accept_languages.append(str(parsed)) 243 244 # We only have two Chinese translations, simplified 245 # and traditional, based on script and not 246 # region. Browsers tend to send identifiers with 247 # region, e.g. zh-CN or zh-TW. Babel can generally 248 # infer the script from those, so we can fabricate a 249 # fallback entry without region, in the hope that it 250 # will match one of our translations and the site will 251 # at least be more legible at first contact than the 252 # probable default locale of English. 253 if parsed.language == "zh" and parsed.script: 254 accept_languages.append(str(Locale(language=parsed.language, script=parsed.script))) 255 except (ValueError, UnknownLocaleError): 256 pass 257 return accept_languages 258 259 260 def set_locale(config: SDConfig) -> None: 261 """ 262 Update locale info in request and session. 263 """ 264 locale = get_locale(config) 265 g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot 266 session["locale"] = locale 267 g.locales = LOCALES # pylint: disable=assigning-non-slot 268 [end of securedrop/i18n.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/i18n.py b/securedrop/i18n.py --- a/securedrop/i18n.py +++ b/securedrop/i18n.py @@ -16,7 +16,7 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. # import collections -from typing import Dict, List, Set +from typing import Dict, List, OrderedDict, Set from babel.core import ( Locale, @@ -25,7 +25,7 @@ negotiate_locale, parse_locale, ) -from flask import Flask, g, request, session +from flask import Flask, current_app, g, request, session from flask_babel import Babel from sdconfig import FALLBACK_LOCALE, SDConfig @@ -128,7 +128,7 @@ return {Locale.parse(code) for code in codes} -def validate_locale_configuration(config: SDConfig, babel: Babel) -> None: +def validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]: """ Check that configured locales are available in the filesystem and therefore usable by Babel. Warn about configured locales that are not usable, unless we're left with @@ -157,16 +157,12 @@ f"None of the default locales {defaults} are in the set of usable locales {usable}" ) - global USABLE_LOCALES - USABLE_LOCALES = usable + return usable -# TODO(#6420): avoid relying on and manipulating on this global state -LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo] -USABLE_LOCALES = set() # type: Set[Locale] - - -def map_locale_display_names(config: SDConfig) -> None: +def map_locale_display_names( + config: SDConfig, usable_locales: Set[Locale] +) -> OrderedDict[str, RequestLocaleInfo]: """ Create a map of locale identifiers to names for display. @@ -183,7 +179,7 @@ locale_map = collections.OrderedDict() for l in sorted(config.SUPPORTED_LOCALES): - if Locale.parse(l) not in USABLE_LOCALES: + if Locale.parse(l) not in usable_locales: continue locale = RequestLocaleInfo(l) @@ -193,14 +189,13 @@ locale_map[str(locale)] = locale - global LOCALES - LOCALES = locale_map + return locale_map def configure(config: SDConfig, app: Flask) -> None: babel = configure_babel(config, app) - validate_locale_configuration(config, babel) - map_locale_display_names(config) + usable_locales = validate_locale_configuration(config, babel) + app.config["LOCALES"] = map_locale_display_names(config, usable_locales) def get_locale(config: SDConfig) -> str: @@ -223,7 +218,8 @@ preferences.append(config.DEFAULT_LOCALE) preferences.append(FALLBACK_LOCALE) - negotiated = negotiate_locale(preferences, LOCALES.keys()) + locales = current_app.config["LOCALES"] + negotiated = negotiate_locale(preferences, locales.keys()) if not negotiated: raise ValueError("No usable locale") @@ -264,4 +260,4 @@ locale = get_locale(config) g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot session["locale"] = locale - g.locales = LOCALES # pylint: disable=assigning-non-slot + g.locales = current_app.config["LOCALES"] # pylint: disable=assigning-non-slot
{"golden_diff": "diff --git a/securedrop/i18n.py b/securedrop/i18n.py\n--- a/securedrop/i18n.py\n+++ b/securedrop/i18n.py\n@@ -16,7 +16,7 @@\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n #\n import collections\n-from typing import Dict, List, Set\n+from typing import Dict, List, OrderedDict, Set\n \n from babel.core import (\n Locale,\n@@ -25,7 +25,7 @@\n negotiate_locale,\n parse_locale,\n )\n-from flask import Flask, g, request, session\n+from flask import Flask, current_app, g, request, session\n from flask_babel import Babel\n from sdconfig import FALLBACK_LOCALE, SDConfig\n \n@@ -128,7 +128,7 @@\n return {Locale.parse(code) for code in codes}\n \n \n-def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n+def validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n@@ -157,16 +157,12 @@\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n \n- global USABLE_LOCALES\n- USABLE_LOCALES = usable\n+ return usable\n \n \n-# TODO(#6420): avoid relying on and manipulating on this global state\n-LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\n-USABLE_LOCALES = set() # type: Set[Locale]\n-\n-\n-def map_locale_display_names(config: SDConfig) -> None:\n+def map_locale_display_names(\n+ config: SDConfig, usable_locales: Set[Locale]\n+) -> OrderedDict[str, RequestLocaleInfo]:\n \"\"\"\n Create a map of locale identifiers to names for display.\n \n@@ -183,7 +179,7 @@\n \n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n- if Locale.parse(l) not in USABLE_LOCALES:\n+ if Locale.parse(l) not in usable_locales:\n continue\n \n locale = RequestLocaleInfo(l)\n@@ -193,14 +189,13 @@\n \n locale_map[str(locale)] = locale\n \n- global LOCALES\n- LOCALES = locale_map\n+ return locale_map\n \n \n def configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n- validate_locale_configuration(config, babel)\n- map_locale_display_names(config)\n+ usable_locales = validate_locale_configuration(config, babel)\n+ app.config[\"LOCALES\"] = map_locale_display_names(config, usable_locales)\n \n \n def get_locale(config: SDConfig) -> str:\n@@ -223,7 +218,8 @@\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n \n- negotiated = negotiate_locale(preferences, LOCALES.keys())\n+ locales = current_app.config[\"LOCALES\"]\n+ negotiated = negotiate_locale(preferences, locales.keys())\n \n if not negotiated:\n raise ValueError(\"No usable locale\")\n@@ -264,4 +260,4 @@\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n- g.locales = LOCALES # pylint: disable=assigning-non-slot\n+ g.locales = current_app.config[\"LOCALES\"] # pylint: disable=assigning-non-slot\n", "issue": "runtime `i18n` configuration manipulates global state\nI guess we already do this for LOCALES, but I think continuing the pattern should be accompanied by a TODO that manipulating/relying on global state is not desirable.\r\n\r\n_Originally posted by @legoktm in https://github.com/freedomofpress/securedrop/pull/6406#discussion_r863080227_\n", "before_files": [{"content": "#\n# SecureDrop whistleblower submission system\n# Copyright (C) 2017 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport collections\nfrom typing import Dict, List, Set\n\nfrom babel.core import (\n Locale,\n UnknownLocaleError,\n get_locale_identifier,\n negotiate_locale,\n parse_locale,\n)\nfrom flask import Flask, g, request, session\nfrom flask_babel import Babel\nfrom sdconfig import FALLBACK_LOCALE, SDConfig\n\n\nclass RequestLocaleInfo:\n \"\"\"\n Convenience wrapper around a babel.core.Locale.\n \"\"\"\n\n def __init__(self, locale: str):\n self.locale = Locale.parse(locale)\n\n # This attribute can be set to `True` to differentiate multiple\n # locales currently available (supported) for the same language.\n self.use_display_name = False\n\n def __str__(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n \"\"\"\n return str(self.locale)\n\n @property\n def display_name(self) -> str:\n \"\"\"\n Give callers (i.e., templates) the `Locale` object's display name when\n such resolution is warranted, otherwise the language name---as\n determined by `map_locale_display_names()`.\n \"\"\"\n if self.use_display_name:\n return self.locale.display_name\n return self.locale.language_name\n\n @property\n def text_direction(self) -> str:\n \"\"\"\n The Babel text direction: ltr or rtl.\n\n Used primarily to set text direction in HTML via the \"dir\"\n attribute.\n \"\"\"\n return self.locale.text_direction\n\n @property\n def language(self) -> str:\n \"\"\"\n The Babel language name.\n\n Just the language, without subtag info like region or script.\n \"\"\"\n return self.locale.language\n\n @property\n def id(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n\n This should match the name of the directory containing its\n translations.\n \"\"\"\n return str(self.locale)\n\n @property\n def language_tag(self) -> str:\n \"\"\"\n Returns a BCP47/RFC5646 language tag for the locale.\n\n Language tags are used in HTTP headers and the HTML lang\n attribute.\n \"\"\"\n return get_locale_identifier(parse_locale(str(self.locale)), sep=\"-\")\n\n\ndef configure_babel(config: SDConfig, app: Flask) -> Babel:\n \"\"\"\n Set up Flask-Babel according to the SecureDrop configuration.\n \"\"\"\n # Tell Babel where to find our translations.\n translations_directory = str(config.TRANSLATION_DIRS.absolute())\n app.config[\"BABEL_TRANSLATION_DIRECTORIES\"] = translations_directory\n\n # Create the app's Babel instance. Passing the app to the\n # constructor causes the instance to attach itself to the app.\n babel = Babel(app)\n\n # verify that Babel is only using the translations we told it about\n if list(babel.translation_directories) != [translations_directory]:\n raise ValueError(\n \"Babel translation directories ({}) do not match SecureDrop configuration ({})\".format(\n babel.translation_directories, [translations_directory]\n )\n )\n\n # register the function used to determine the locale of a request\n babel.localeselector(lambda: get_locale(config))\n return babel\n\n\ndef parse_locale_set(codes: List[str]) -> Set[Locale]:\n return {Locale.parse(code) for code in codes}\n\n\ndef validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n no usable default or fallback locale, in which case raise an exception.\n \"\"\"\n # These locales are available and loadable from the filesystem.\n available = set(babel.list_translations())\n available.add(Locale.parse(FALLBACK_LOCALE))\n\n # These locales were configured via \"securedrop-admin sdconfig\", meaning\n # they were present on the Admin Workstation at \"securedrop-admin\" runtime.\n configured = parse_locale_set(config.SUPPORTED_LOCALES)\n\n # The intersection of these sets is the set of locales usable by Babel.\n usable = available & configured\n\n missing = configured - usable\n if missing:\n babel.app.logger.error(\n f\"Configured locales {missing} are not in the set of usable locales {usable}\"\n )\n\n defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])\n if not defaults & usable:\n raise ValueError(\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n\n global USABLE_LOCALES\n USABLE_LOCALES = usable\n\n\n# TODO(#6420): avoid relying on and manipulating on this global state\nLOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\nUSABLE_LOCALES = set() # type: Set[Locale]\n\n\ndef map_locale_display_names(config: SDConfig) -> None:\n \"\"\"\n Create a map of locale identifiers to names for display.\n\n For most of our supported languages, we only provide one\n translation, so including the full display name is not necessary\n to distinguish them. For languages with more than one translation,\n like Chinese, we do need the additional detail.\n \"\"\"\n\n language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]\n for l in sorted(config.SUPPORTED_LOCALES):\n locale = RequestLocaleInfo(l)\n language_locale_counts[locale.language] += 1\n\n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n if Locale.parse(l) not in USABLE_LOCALES:\n continue\n\n locale = RequestLocaleInfo(l)\n if language_locale_counts[locale.language] > 1:\n # Disambiguate translations for this language.\n locale.use_display_name = True\n\n locale_map[str(locale)] = locale\n\n global LOCALES\n LOCALES = locale_map\n\n\ndef configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n validate_locale_configuration(config, babel)\n map_locale_display_names(config)\n\n\ndef get_locale(config: SDConfig) -> str:\n \"\"\"\n Return the best supported locale for a request.\n\n Get the locale as follows, by order of precedence:\n - l request argument or session['locale']\n - browser suggested locale, from the Accept-Languages header\n - config.DEFAULT_LOCALE\n - config.FALLBACK_LOCALE\n \"\"\"\n preferences = []\n if session and session.get(\"locale\"):\n preferences.append(session.get(\"locale\"))\n if request.args.get(\"l\"):\n preferences.insert(0, request.args.get(\"l\"))\n if not preferences:\n preferences.extend(get_accepted_languages())\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n\n negotiated = negotiate_locale(preferences, LOCALES.keys())\n\n if not negotiated:\n raise ValueError(\"No usable locale\")\n\n return negotiated\n\n\ndef get_accepted_languages() -> List[str]:\n \"\"\"\n Convert a request's list of accepted languages into locale identifiers.\n \"\"\"\n accept_languages = []\n for l in request.accept_languages.values():\n try:\n parsed = Locale.parse(l, \"-\")\n accept_languages.append(str(parsed))\n\n # We only have two Chinese translations, simplified\n # and traditional, based on script and not\n # region. Browsers tend to send identifiers with\n # region, e.g. zh-CN or zh-TW. Babel can generally\n # infer the script from those, so we can fabricate a\n # fallback entry without region, in the hope that it\n # will match one of our translations and the site will\n # at least be more legible at first contact than the\n # probable default locale of English.\n if parsed.language == \"zh\" and parsed.script:\n accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))\n except (ValueError, UnknownLocaleError):\n pass\n return accept_languages\n\n\ndef set_locale(config: SDConfig) -> None:\n \"\"\"\n Update locale info in request and session.\n \"\"\"\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n g.locales = LOCALES # pylint: disable=assigning-non-slot\n", "path": "securedrop/i18n.py"}]}
3,329
850
gh_patches_debug_30297
rasdani/github-patches
git_diff
nltk__nltk-926
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BLEU score brevity penalty does not follow definition Papineni et al. (2002), if I understand correctly, define the BLEU brevity penalty as follows: - let _c_ be the length of the candidate - let _r_ be the length of the reference which is closest in length to the candidate Then, BP = 1 if c > r, and BP = exp(1 - r / c) otherwise. (There are some details about doing this at the corpus level, but they're not relevant to the point at hand.) But this not what `nltk.align.bleu_score._brevity_penalty` (called by `nltk.align.bleu_score`) computes. Rather, it computes _r_ as follows: ``` c = len(candidate) r = min(abs(len(r) - c) for r in references) ``` So if _c_ is 12 and _r_ under Papineni et al.'s definition is 28, then this function will set `r` to 16, not 28. However, it is possible I have misunderstood the original paper. And it would be ideal to test this against a canonical implementation. But if I have understood correctly, the fix would be something like: ``` c = len(candidate) ref_lens = [len(ref) for ref in references] r = min(ref_lens, key=lambda ref_len: abs(ref_len - c)) ``` </issue> <code> [start of nltk/align/bleu_score.py] 1 # -*- coding: utf-8 -*- 2 # Natural Language Toolkit: BLEU Score 3 # 4 # Copyright (C) 2001-2015 NLTK Project 5 # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim 6 # Contributors: Dmitrijs Milajevs 7 # URL: <http://nltk.org/> 8 # For license information, see LICENSE.TXT 9 """BLEU score implementation.""" 10 11 from __future__ import division 12 13 import math 14 15 from nltk.tokenize import word_tokenize 16 from nltk.compat import Counter 17 from nltk.util import ngrams 18 19 20 def bleu(candidate, references, weights): 21 """Calculate BLEU score (Bilingual Evaluation Understudy) 22 23 :param candidate: a candidate sentence 24 :type candidate: list(str) 25 :param references: reference sentences 26 :type references: list(list(str)) 27 :param weights: weights for unigrams, bigrams, trigrams and so on 28 :type weights: list(float) 29 30 >>> weights = [0.25, 0.25, 0.25, 0.25] 31 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 32 ... 'ensures', 'that', 'the', 'military', 'always', 33 ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] 34 35 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 36 ... 'forever', 'hearing', 'the', 'activity', 'guidebook', 37 ... 'that', 'party', 'direct'] 38 39 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 40 ... 'ensures', 'that', 'the', 'military', 'will', 'forever', 41 ... 'heed', 'Party', 'commands'] 42 43 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 44 ... 'guarantees', 'the', 'military', 'forces', 'always', 45 ... 'being', 'under', 'the', 'command', 'of', 'the', 46 ... 'Party'] 47 48 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 49 ... 'army', 'always', 'to', 'heed', 'the', 'directions', 50 ... 'of', 'the', 'party'] 51 52 >>> bleu(candidate1, [reference1, reference2, reference3], weights) 53 0.504... 54 55 >>> bleu(candidate2, [reference1, reference2, reference3], weights) 56 0 57 58 Papineni, Kishore, et al. "BLEU: A method for automatic evaluation of 59 machine translation." Proceedings of the 40th annual meeting on association for 60 computational linguistics. Association for Computational Linguistics, 2002. 61 http://www.aclweb.org/anthology/P02-1040.pdf 62 63 """ 64 p_ns = ( 65 _modified_precision(candidate, references, i) 66 for i, _ in enumerate(weights, start=1) 67 ) 68 69 try: 70 s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns)) 71 except ValueError: 72 # some p_ns is 0 73 return 0 74 75 bp = _brevity_penalty(candidate, references) 76 return bp * math.exp(s) 77 78 79 def _modified_precision(candidate, references, n): 80 """Calculate modified ngram precision. 81 82 The normal precision method may lead to some wrong translations with 83 high-precision, e.g., the translation, in which a word of reference 84 repeats several times, has very high precision. So in the modified 85 n-gram precision, a reference word will be considered exhausted after 86 a matching candidate word is identified. 87 88 Paper examples: 89 90 >>> _modified_precision( 91 ... 'the the the the the the the'.split(), 92 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()], 93 ... n=1, 94 ... ) 95 0.28... 96 97 >>> _modified_precision( 98 ... 'the the the the the the the'.split(), 99 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()], 100 ... n=2, 101 ... ) 102 0.0 103 104 >>> _modified_precision( 105 ... 'of the'.split(), 106 ... [ 107 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(), 108 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(), 109 ... 'It is the practical guide for the army always to heed the directions of the party'.split(), 110 ... ], 111 ... n=1, 112 ... ) 113 1.0 114 115 >>> _modified_precision( 116 ... 'of the'.split(), 117 ... [ 118 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(), 119 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(), 120 ... 'It is the practical guide for the army always to heed the directions of the party'.split(), 121 ... ], 122 ... n=2, 123 ... ) 124 1.0 125 126 More examples: 127 128 >>> weights = [0.25, 0.25, 0.25, 0.25] 129 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 130 ... 'ensures', 'that', 'the', 'military', 'always', 131 ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] 132 133 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 134 ... 'forever', 'hearing', 'the', 'activity', 'guidebook', 135 ... 'that', 'party', 'direct'] 136 137 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 138 ... 'ensures', 'that', 'the', 'military', 'will', 'forever', 139 ... 'heed', 'Party', 'commands'] 140 141 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 142 ... 'guarantees', 'the', 'military', 'forces', 'always', 143 ... 'being', 'under', 'the', 'command', 'of', 'the', 144 ... 'Party'] 145 146 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 147 ... 'army', 'always', 'to', 'heed', 'the', 'directions', 148 ... 'of', 'the', 'party'] 149 150 Unigrams: 151 152 >>> _modified_precision( 153 ... candidate1, 154 ... [reference1, reference2, reference3], 155 ... n=1, 156 ... ) 157 0.94... 158 159 >>> _modified_precision( 160 ... candidate2, 161 ... [reference1, reference2, reference3], 162 ... n=1, 163 ... ) 164 0.57... 165 166 Bigrams: 167 168 >>> _modified_precision( 169 ... candidate1, 170 ... [reference1, reference2, reference3], 171 ... n=2, 172 ... ) 173 0.58... 174 175 >>> _modified_precision( 176 ... candidate2, 177 ... [reference1, reference2, reference3], 178 ... n=2, 179 ... ) 180 0.07... 181 182 """ 183 counts = Counter(ngrams(candidate, n)) 184 185 if not counts: 186 return 0 187 188 max_counts = {} 189 for reference in references: 190 reference_counts = Counter(ngrams(reference, n)) 191 for ngram in counts: 192 max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram]) 193 194 clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items()) 195 196 return sum(clipped_counts.values()) / sum(counts.values()) 197 198 199 def _brevity_penalty(candidate, references): 200 """Calculate brevity penalty. 201 202 As the modified n-gram precision still has the problem from the short 203 length sentence, brevity penalty is used to modify the overall BLEU 204 score according to length. 205 206 """ 207 c = len(candidate) 208 r = min(abs(len(r) - c) for r in references) 209 210 if c > r: 211 return 1 212 else: 213 return math.exp(1 - r / c) 214 215 216 # run doctests 217 if __name__ == "__main__": 218 import doctest 219 doctest.testmod(optionflags=doctest.ELLIPSIS) 220 [end of nltk/align/bleu_score.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nltk/align/bleu_score.py b/nltk/align/bleu_score.py --- a/nltk/align/bleu_score.py +++ b/nltk/align/bleu_score.py @@ -203,9 +203,57 @@ length sentence, brevity penalty is used to modify the overall BLEU score according to length. + An example from the paper. There are three references with length 12, 15 + and 17. And a terse candidate of the length 12. The brevity penalty is 1. + + >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17] + >>> candidate = ['a'] * 12 + >>> _brevity_penalty(candidate, references) + 1.0 + + In case a candidate translation is shorter than the references, penalty is + applied. + + >>> references = [['a'] * 28, ['a'] * 28] + >>> candidate = ['a'] * 12 + >>> _brevity_penalty(candidate, references) + 0.2635... + + The length of the closest reference is used to compute the penalty. If the + length of a candidate is 12, and the reference lengths are 13 and 2, the + penalty is applied because the candidate length (12) is less then the + closest reference length (13). + + >>> references = [['a'] * 13, ['a'] * 2] + >>> candidate = ['a'] * 12 + >>> _brevity_penalty(candidate, references) + 0.92... + + The brevity penalty doesn't depend on reference order. More importantly, + when two reference sentences are at the same distance, the shortest + reference sentence length is used. + + >>> references = [['a'] * 13, ['a'] * 11] + >>> candidate = ['a'] * 12 + >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1 + True + + A test example from mteval-v13a.pl (starting from the line 705): + + >>> references = [['a'] * 11, ['a'] * 8] + >>> candidate = ['a'] * 7 + >>> _brevity_penalty(candidate, references) + 0.86... + + >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7] + >>> candidate = ['a'] * 7 + >>> _brevity_penalty(candidate, references) + 1.0 + """ c = len(candidate) - r = min(abs(len(r) - c) for r in references) + ref_lens = (len(reference) for reference in references) + r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len)) if c > r: return 1
{"golden_diff": "diff --git a/nltk/align/bleu_score.py b/nltk/align/bleu_score.py\n--- a/nltk/align/bleu_score.py\n+++ b/nltk/align/bleu_score.py\n@@ -203,9 +203,57 @@\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n \n+ An example from the paper. There are three references with length 12, 15\n+ and 17. And a terse candidate of the length 12. The brevity penalty is 1.\n+\n+ >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 1.0\n+\n+ In case a candidate translation is shorter than the references, penalty is\n+ applied.\n+\n+ >>> references = [['a'] * 28, ['a'] * 28]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 0.2635...\n+\n+ The length of the closest reference is used to compute the penalty. If the\n+ length of a candidate is 12, and the reference lengths are 13 and 2, the\n+ penalty is applied because the candidate length (12) is less then the\n+ closest reference length (13).\n+\n+ >>> references = [['a'] * 13, ['a'] * 2]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 0.92...\n+\n+ The brevity penalty doesn't depend on reference order. More importantly,\n+ when two reference sentences are at the same distance, the shortest\n+ reference sentence length is used.\n+\n+ >>> references = [['a'] * 13, ['a'] * 11]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1\n+ True\n+\n+ A test example from mteval-v13a.pl (starting from the line 705):\n+\n+ >>> references = [['a'] * 11, ['a'] * 8]\n+ >>> candidate = ['a'] * 7\n+ >>> _brevity_penalty(candidate, references)\n+ 0.86...\n+\n+ >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n+ >>> candidate = ['a'] * 7\n+ >>> _brevity_penalty(candidate, references)\n+ 1.0\n+\n \"\"\"\n c = len(candidate)\n- r = min(abs(len(r) - c) for r in references)\n+ ref_lens = (len(reference) for reference in references)\n+ r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))\n \n if c > r:\n return 1\n", "issue": "BLEU score brevity penalty does not follow definition\nPapineni et al. (2002), if I understand correctly, define the BLEU brevity penalty as follows:\n- let _c_ be the length of the candidate\n- let _r_ be the length of the reference which is closest in length to the candidate\n\nThen, BP = 1 if c > r, and BP = exp(1 - r / c) otherwise.\n\n(There are some details about doing this at the corpus level, but they're not relevant to the point at hand.) \n\nBut this not what `nltk.align.bleu_score._brevity_penalty` (called by `nltk.align.bleu_score`) computes. Rather, it computes _r_ as follows:\n\n```\nc = len(candidate)\nr = min(abs(len(r) - c) for r in references)\n```\n\nSo if _c_ is 12 and _r_ under Papineni et al.'s definition is 28, then this function will set `r` to 16, not 28.\n\nHowever, it is possible I have misunderstood the original paper. And it would be ideal to test this against a canonical implementation. But if I have understood correctly, the fix would be something like:\n\n```\nc = len(candidate)\nref_lens = [len(ref) for ref in references]\nr = min(ref_lens, key=lambda ref_len: abs(ref_len - c))\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Natural Language Toolkit: BLEU Score\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n# Contributors: Dmitrijs Milajevs\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\"\"\"BLEU score implementation.\"\"\"\n\nfrom __future__ import division\n\nimport math\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.compat import Counter\nfrom nltk.util import ngrams\n\n\ndef bleu(candidate, references, weights):\n \"\"\"Calculate BLEU score (Bilingual Evaluation Understudy)\n\n :param candidate: a candidate sentence\n :type candidate: list(str)\n :param references: reference sentences\n :type references: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> bleu(candidate1, [reference1, reference2, reference3], weights)\n 0.504...\n\n >>> bleu(candidate2, [reference1, reference2, reference3], weights)\n 0\n\n Papineni, Kishore, et al. \"BLEU: A method for automatic evaluation of\n machine translation.\" Proceedings of the 40th annual meeting on association for\n computational linguistics. Association for Computational Linguistics, 2002.\n http://www.aclweb.org/anthology/P02-1040.pdf\n\n \"\"\"\n p_ns = (\n _modified_precision(candidate, references, i)\n for i, _ in enumerate(weights, start=1)\n )\n\n try:\n s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))\n except ValueError:\n # some p_ns is 0\n return 0\n\n bp = _brevity_penalty(candidate, references)\n return bp * math.exp(s)\n\n\ndef _modified_precision(candidate, references, n):\n \"\"\"Calculate modified ngram precision.\n\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision. So in the modified\n n-gram precision, a reference word will be considered exhausted after\n a matching candidate word is identified.\n\n Paper examples:\n\n >>> _modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=1,\n ... )\n 0.28...\n\n >>> _modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=2,\n ... )\n 0.0\n\n >>> _modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=1,\n ... )\n 1.0\n\n >>> _modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=2,\n ... )\n 1.0\n\n More examples:\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n Unigrams:\n\n >>> _modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.94...\n\n >>> _modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.57...\n\n Bigrams:\n\n >>> _modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.58...\n\n >>> _modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.07...\n\n \"\"\"\n counts = Counter(ngrams(candidate, n))\n\n if not counts:\n return 0\n\n max_counts = {}\n for reference in references:\n reference_counts = Counter(ngrams(reference, n))\n for ngram in counts:\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n\n clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())\n\n return sum(clipped_counts.values()) / sum(counts.values())\n\n\ndef _brevity_penalty(candidate, references):\n \"\"\"Calculate brevity penalty.\n\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n\n \"\"\"\n c = len(candidate)\n r = min(abs(len(r) - c) for r in references)\n\n if c > r:\n return 1\n else:\n return math.exp(1 - r / c)\n\n\n# run doctests\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(optionflags=doctest.ELLIPSIS)\n", "path": "nltk/align/bleu_score.py"}]}
3,458
728
gh_patches_debug_64325
rasdani/github-patches
git_diff
pex-tool__pex-1725
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.80 On the docket: + [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721) + [x] Fix more pathologic lock creation slowness. (#1723) </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.79" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.79" +__version__ = "2.1.80"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.79\"\n+__version__ = \"2.1.80\"\n", "issue": "Release 2.1.80\nOn the docket:\r\n+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)\r\n+ [x] Fix more pathologic lock creation slowness. (#1723)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.79\"\n", "path": "pex/version.py"}]}
640
96
gh_patches_debug_43869
rasdani/github-patches
git_diff
aws__aws-cli-3331
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws configure get and aws configure set with multiword profile names are inconsistent It seems that `aws configure set --profile "two words"` will add single quotes around the profile name, but `aws configure get --profile "two words"` will search for a profile name that does not have single quotes around the profile name. These two methods should behave in a similar manner. To reproduce: ``` $ aws --version aws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10 $ aws configure set aws_access_key_id test --profile "test profile" $ aws configure get aws_access_key_id --profile "test profile" The config profile (test profile) could not be found $ aws configure get aws_access_key_id --profile "'test profile'" test ``` </issue> <code> [start of awscli/customizations/configure/set.py] 1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 import os 14 15 from awscli.customizations.commands import BasicCommand 16 from awscli.customizations.configure.writer import ConfigFileWriter 17 18 from . import PREDEFINED_SECTION_NAMES, profile_to_section 19 20 21 class ConfigureSetCommand(BasicCommand): 22 NAME = 'set' 23 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set', 24 '_description.rst') 25 SYNOPSIS = 'aws configure set varname value [--profile profile-name]' 26 EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst') 27 ARG_TABLE = [ 28 {'name': 'varname', 29 'help_text': 'The name of the config value to set.', 30 'action': 'store', 31 'cli_type_name': 'string', 'positional_arg': True}, 32 {'name': 'value', 33 'help_text': 'The value to set.', 34 'action': 'store', 35 'no_paramfile': True, # To disable the default paramfile behavior 36 'cli_type_name': 'string', 'positional_arg': True}, 37 ] 38 # Any variables specified in this list will be written to 39 # the ~/.aws/credentials file instead of ~/.aws/config. 40 _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key', 41 'aws_session_token'] 42 43 def __init__(self, session, config_writer=None): 44 super(ConfigureSetCommand, self).__init__(session) 45 if config_writer is None: 46 config_writer = ConfigFileWriter() 47 self._config_writer = config_writer 48 49 def _run_main(self, args, parsed_globals): 50 varname = args.varname 51 value = args.value 52 section = 'default' 53 # Before handing things off to the config writer, 54 # we need to find out three things: 55 # 1. What section we're writing to (section). 56 # 2. The name of the config key (varname) 57 # 3. The actual value (value). 58 if '.' not in varname: 59 # unqualified name, scope it to the current 60 # profile (or leave it as the 'default' section if 61 # no profile is set). 62 if self._session.profile is not None: 63 section = profile_to_section(self._session.profile) 64 else: 65 # First figure out if it's been scoped to a profile. 66 parts = varname.split('.') 67 if parts[0] in ('default', 'profile'): 68 # Then we know we're scoped to a profile. 69 if parts[0] == 'default': 70 section = 'default' 71 remaining = parts[1:] 72 else: 73 # [profile, profile_name, ...] 74 section = profile_to_section(parts[1]) 75 remaining = parts[2:] 76 varname = remaining[0] 77 if len(remaining) == 2: 78 value = {remaining[1]: value} 79 elif parts[0] not in PREDEFINED_SECTION_NAMES: 80 if self._session.profile is not None: 81 section = profile_to_section(self._session.profile) 82 else: 83 profile_name = self._session.get_config_variable('profile') 84 if profile_name is not None: 85 section = profile_name 86 varname = parts[0] 87 if len(parts) == 2: 88 value = {parts[1]: value} 89 elif len(parts) == 2: 90 # Otherwise it's something like "set preview.service true" 91 # of something in the [plugin] section. 92 section, varname = parts 93 config_filename = os.path.expanduser( 94 self._session.get_config_variable('config_file')) 95 updated_config = {'__section__': section, varname: value} 96 if varname in self._WRITE_TO_CREDS_FILE: 97 config_filename = os.path.expanduser( 98 self._session.get_config_variable('credentials_file')) 99 section_name = updated_config['__section__'] 100 if section_name.startswith('profile '): 101 updated_config['__section__'] = section_name[8:] 102 self._config_writer.update_config(updated_config, config_filename) 103 [end of awscli/customizations/configure/set.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py --- a/awscli/customizations/configure/set.py +++ b/awscli/customizations/configure/set.py @@ -46,13 +46,17 @@ config_writer = ConfigFileWriter() self._config_writer = config_writer + def _get_config_file(self, path): + config_path = self._session.get_config_variable(path) + return os.path.expanduser(config_path) + def _run_main(self, args, parsed_globals): varname = args.varname value = args.value - section = 'default' + profile = 'default' # Before handing things off to the config writer, # we need to find out three things: - # 1. What section we're writing to (section). + # 1. What section we're writing to (profile). # 2. The name of the config key (varname) # 3. The actual value (value). if '.' not in varname: @@ -60,43 +64,44 @@ # profile (or leave it as the 'default' section if # no profile is set). if self._session.profile is not None: - section = profile_to_section(self._session.profile) + profile = self._session.profile else: # First figure out if it's been scoped to a profile. parts = varname.split('.') if parts[0] in ('default', 'profile'): # Then we know we're scoped to a profile. if parts[0] == 'default': - section = 'default' + profile = 'default' remaining = parts[1:] else: # [profile, profile_name, ...] - section = profile_to_section(parts[1]) + profile = parts[1] remaining = parts[2:] varname = remaining[0] if len(remaining) == 2: value = {remaining[1]: value} elif parts[0] not in PREDEFINED_SECTION_NAMES: if self._session.profile is not None: - section = profile_to_section(self._session.profile) + profile = self._session.profile else: profile_name = self._session.get_config_variable('profile') if profile_name is not None: - section = profile_name + profile = profile_name varname = parts[0] if len(parts) == 2: value = {parts[1]: value} elif len(parts) == 2: # Otherwise it's something like "set preview.service true" # of something in the [plugin] section. - section, varname = parts - config_filename = os.path.expanduser( - self._session.get_config_variable('config_file')) - updated_config = {'__section__': section, varname: value} + profile, varname = parts + config_filename = self._get_config_file('config_file') if varname in self._WRITE_TO_CREDS_FILE: - config_filename = os.path.expanduser( - self._session.get_config_variable('credentials_file')) - section_name = updated_config['__section__'] - if section_name.startswith('profile '): - updated_config['__section__'] = section_name[8:] + # When writing to the creds file, the section is just the profile + section = profile + config_filename = self._get_config_file('credentials_file') + elif profile in PREDEFINED_SECTION_NAMES or profile == 'default': + section = profile + else: + section = profile_to_section(profile) + updated_config = {'__section__': section, varname: value} self._config_writer.update_config(updated_config, config_filename)
{"golden_diff": "diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py\n--- a/awscli/customizations/configure/set.py\n+++ b/awscli/customizations/configure/set.py\n@@ -46,13 +46,17 @@\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n \n+ def _get_config_file(self, path):\n+ config_path = self._session.get_config_variable(path)\n+ return os.path.expanduser(config_path)\n+\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n- section = 'default'\n+ profile = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n- # 1. What section we're writing to (section).\n+ # 1. What section we're writing to (profile).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n@@ -60,43 +64,44 @@\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n- section = 'default'\n+ profile = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n- section = profile_to_section(parts[1])\n+ profile = parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n- section = profile_name\n+ profile = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n- section, varname = parts\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('config_file'))\n- updated_config = {'__section__': section, varname: value}\n+ profile, varname = parts\n+ config_filename = self._get_config_file('config_file')\n if varname in self._WRITE_TO_CREDS_FILE:\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('credentials_file'))\n- section_name = updated_config['__section__']\n- if section_name.startswith('profile '):\n- updated_config['__section__'] = section_name[8:]\n+ # When writing to the creds file, the section is just the profile\n+ section = profile\n+ config_filename = self._get_config_file('credentials_file')\n+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\n+ section = profile\n+ else:\n+ section = profile_to_section(profile)\n+ updated_config = {'__section__': section, varname: value}\n self._config_writer.update_config(updated_config, config_filename)\n", "issue": "aws configure get and aws configure set with multiword profile names are inconsistent\nIt seems that `aws configure set --profile \"two words\"` will add single quotes around the profile name, but `aws configure get --profile \"two words\"` will search for a profile name that does not have single quotes around the profile name.\r\n\r\nThese two methods should behave in a similar manner.\r\n\r\nTo reproduce:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10\r\n$ aws configure set aws_access_key_id test --profile \"test profile\"\r\n$ aws configure get aws_access_key_id --profile \"test profile\"\r\nThe config profile (test profile) could not be found\r\n$ aws configure get aws_access_key_id --profile \"'test profile'\"\r\ntest\r\n```\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n section = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (section).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n section = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n section = profile_to_section(parts[1])\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n section = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n section, varname = parts\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n updated_config = {'__section__': section, varname: value}\n if varname in self._WRITE_TO_CREDS_FILE:\n config_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n section_name = updated_config['__section__']\n if section_name.startswith('profile '):\n updated_config['__section__'] = section_name[8:]\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}]}
1,920
833
gh_patches_debug_41410
rasdani/github-patches
git_diff
qtile__qtile-1943
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Popups and child-windows are drawn behind parent windows in certain situations # Issue description In some situations I noticed that popups or child-windows are drawn (or moved) behind their respective parents; for child-windows this can be quite a problem in case the child is blocking, since then the software might be completely unresponsive. Places where I have noticed this and that I remember: 1. Virtualbox (fullscreen) (the main window, not a client): Context menus on VM-list open in the background. 2. Veracrypt (floating) "mount" child-window: The window either spawns in the back or is moved there once it loses focus. There have been some other cases, but due to testing PR #1870, which I initially expected to be the cause for this, I haven't taken note of all of them. Some software definitely has working popup-windows, so Virtualbox might be doing something weird. # Qtile version https://github.com/qtile/qtile/commit/c6b80e9444e9b4e5dc2f52ca1954dc502c45f2eb (newer versions not tested yet) # Configuration I'll only include one snippet here, since verything else (setting up Keys and Widgets) most likely has nothing to do with the problem. ```python @hook.subscribe.client_new def modify_window(client): if (client.window.get_wm_transient_for() or client.window.get_wm_type() in floating_types): client.floating = True ``` On top of this I am currently using fake screens instead of normal screens. It would be nice if somebody could confirm this, since I am not entirely sure this isn't all caused by my own PR. Related to #1870, which could fix at least the described issue with child-windows. </issue> <code> [start of libqtile/layout/floating.py] 1 # Copyright (c) 2010 matt 2 # Copyright (c) 2010-2011 Paul Colomiets 3 # Copyright (c) 2011 Mounier Florian 4 # Copyright (c) 2012 Craig Barnes 5 # Copyright (c) 2012, 2014-2015 Tycho Andersen 6 # Copyright (c) 2013 Tao Sauvage 7 # Copyright (c) 2013 Julien Iguchi-Cartigny 8 # Copyright (c) 2014 ramnes 9 # Copyright (c) 2014 Sean Vig 10 # Copyright (c) 2014 dequis 11 # Copyright (c) 2018 Nazar Mokrynskyi 12 # 13 # Permission is hereby granted, free of charge, to any person obtaining a copy 14 # of this software and associated documentation files (the "Software"), to deal 15 # in the Software without restriction, including without limitation the rights 16 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 # copies of the Software, and to permit persons to whom the Software is 18 # furnished to do so, subject to the following conditions: 19 # 20 # The above copyright notice and this permission notice shall be included in 21 # all copies or substantial portions of the Software. 22 # 23 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 # SOFTWARE. 30 31 import warnings 32 33 from libqtile.config import Match 34 from libqtile.layout.base import Layout 35 from libqtile.log_utils import logger 36 37 38 class Floating(Layout): 39 """ 40 Floating layout, which does nothing with windows but handles focus order 41 """ 42 defaults = [ 43 ("border_focus", "#0000ff", "Border colour for the focused window."), 44 ("border_normal", "#000000", "Border colour for un-focused windows."), 45 ("border_width", 1, "Border width."), 46 ("max_border_width", 0, "Border width for maximize."), 47 ("fullscreen_border_width", 0, "Border width for fullscreen."), 48 ("name", "floating", "Name of this layout."), 49 ] 50 51 def __init__(self, float_rules=None, no_reposition_rules=None, **config): 52 """ 53 If you have certain apps that you always want to float you can provide 54 ``float_rules`` to do so. ``float_rules`` are a list of 55 Match objects:: 56 57 from libqtile.config import Match 58 Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE) 59 60 When a new window is opened its ``match`` method is called with each of 61 these rules. If one matches, the window will float. The following 62 will float GIMP and Skype:: 63 64 from libqtile.config import Match 65 float_rules=[Match(wm_class="skype"), Match(wm_class="gimp")] 66 67 Specify these in the ``floating_layout`` in your config. 68 69 Floating layout will try to center most of floating windows by default, 70 but if you don't want this to happen for certain windows that are 71 centered by mistake, you can use ``no_reposition_rules`` option to 72 specify them and layout will rely on windows to position themselves in 73 correct location on the screen. 74 """ 75 Layout.__init__(self, **config) 76 self.clients = [] 77 self.focused = None 78 self.group = None 79 self.float_rules = float_rules or [] 80 81 warned = False 82 for index, rule in enumerate(self.float_rules): 83 if isinstance(rule, Match): 84 continue 85 86 if not warned: 87 message = "Non-config.Match objects in float_rules are " \ 88 "deprecated" 89 warnings.warn(message, DeprecationWarning) 90 logger.warning(message) 91 warned = True 92 93 match = Match( 94 title=rule.get("wname"), wm_class=rule.get("wmclass"), 95 role=rule.get("role"), wm_type=rule.get("wm_type"), 96 wm_instance_class=rule.get("wm_instance_class"), 97 net_wm_pid=rule.get("net_wm_pid")) 98 99 self.float_rules[index] = match 100 101 self.no_reposition_rules = no_reposition_rules or [] 102 self.add_defaults(Floating.defaults) 103 104 def match(self, win): 105 """Used to default float some windows""" 106 return any(win.match(rule) for rule in self.float_rules) 107 108 def find_clients(self, group): 109 """Find all clients belonging to a given group""" 110 return [c for c in self.clients if c.group is group] 111 112 def to_screen(self, group, new_screen): 113 """Adjust offsets of clients within current screen""" 114 for win in self.find_clients(group): 115 if win.maximized: 116 win.maximized = True 117 elif win.fullscreen: 118 win.fullscreen = True 119 else: 120 # catch if the client hasn't been configured 121 try: 122 # By default, place window at same offset from top corner 123 new_x = new_screen.x + win.float_x 124 new_y = new_screen.y + win.float_y 125 except AttributeError: 126 # this will be handled in .configure() 127 pass 128 else: 129 # make sure window isn't off screen left/right... 130 new_x = min(new_x, new_screen.x + new_screen.width - win.width) 131 new_x = max(new_x, new_screen.x) 132 # and up/down 133 new_y = min(new_y, new_screen.y + new_screen.height - win.height) 134 new_y = max(new_y, new_screen.y) 135 136 win.x = new_x 137 win.y = new_y 138 win.group = new_screen.group 139 140 def focus_first(self, group=None): 141 if group is None: 142 clients = self.clients 143 else: 144 clients = self.find_clients(group) 145 146 if clients: 147 return clients[0] 148 149 def focus_next(self, win): 150 if win not in self.clients or win.group is None: 151 return 152 153 clients = self.find_clients(win.group) 154 idx = clients.index(win) 155 if len(clients) > idx + 1: 156 return clients[idx + 1] 157 158 def focus_last(self, group=None): 159 if group is None: 160 clients = self.clients 161 else: 162 clients = self.find_clients(group) 163 164 if clients: 165 return clients[-1] 166 167 def focus_previous(self, win): 168 if win not in self.clients or win.group is None: 169 return 170 171 clients = self.find_clients(win.group) 172 idx = clients.index(win) 173 if idx > 0: 174 return clients[idx - 1] 175 176 def focus(self, client): 177 self.focused = client 178 179 def blur(self): 180 self.focused = None 181 182 def compute_client_position(self, client, screen_rect): 183 """ recompute client.x and client.y, returning whether or not to place 184 this client above other windows or not """ 185 above = False 186 transient_for = client.window.get_wm_transient_for() 187 win = client.group.qtile.windows_map.get(transient_for) 188 if win is not None: 189 # if transient for a window, place in the center of the window 190 center_x = win.x + win.width / 2 191 center_y = win.y + win.height / 2 192 else: 193 center_x = screen_rect.x + screen_rect.width / 2 194 center_y = screen_rect.y + screen_rect.height / 2 195 above = True 196 197 x = center_x - client.width / 2 198 y = center_y - client.height / 2 199 200 # don't go off the right... 201 x = min(x, screen_rect.x + screen_rect.width) 202 # or left... 203 x = max(x, screen_rect.x) 204 # or bottom... 205 y = min(y, screen_rect.y + screen_rect.height) 206 # or top 207 y = max(y, screen_rect.y) 208 209 client.x = int(round(x)) 210 client.y = int(round(y)) 211 return above 212 213 def configure(self, client, screen_rect): 214 if client.has_focus: 215 bc = self.border_focus 216 else: 217 bc = self.border_normal 218 219 if client.maximized: 220 bw = self.max_border_width 221 elif client.fullscreen: 222 bw = self.fullscreen_border_width 223 else: 224 bw = self.border_width 225 226 # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application, 227 # don't reposition it anywhere, let Java app to control it 228 cls = client.window.get_wm_class() or '' 229 is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls 230 if is_java_dropdown: 231 client.paint_borders(bc, bw) 232 client.cmd_bring_to_front() 233 234 # similar to above but the X11 version, the client may have already 235 # placed itself. let's respect that 236 elif client.has_user_set_position(): 237 client.paint_borders(bc, bw) 238 client.cmd_bring_to_front() 239 240 # ok, it's not java and the window itself didn't position it, but users 241 # may still have asked us not to mess with it 242 elif any(m.compare(client) for m in self.no_reposition_rules): 243 client.paint_borders(bc, bw) 244 client.cmd_bring_to_front() 245 246 else: 247 above = False 248 249 # We definitely have a screen here, so let's be sure we'll float on screen 250 try: 251 client.float_x 252 client.float_y 253 except AttributeError: 254 # this window hasn't been placed before, let's put it in a sensible spot 255 above = self.compute_client_position(client, screen_rect) 256 257 client.place( 258 client.x, 259 client.y, 260 client.width, 261 client.height, 262 bw, 263 bc, 264 above, 265 ) 266 client.unhide() 267 268 def add(self, client): 269 self.clients.append(client) 270 self.focused = client 271 272 def remove(self, client): 273 if client not in self.clients: 274 return 275 276 next_focus = self.focus_next(client) 277 if client is self.focused: 278 self.blur() 279 self.clients.remove(client) 280 return next_focus 281 282 def info(self): 283 d = Layout.info(self) 284 d["clients"] = [c.name for c in self.clients] 285 return d 286 287 def cmd_next(self): 288 # This can't ever be called, but implement the abstract method 289 pass 290 291 def cmd_previous(self): 292 # This can't ever be called, but implement the abstract method 293 pass 294 [end of libqtile/layout/floating.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py --- a/libqtile/layout/floating.py +++ b/libqtile/layout/floating.py @@ -179,35 +179,53 @@ def blur(self): self.focused = None + def on_screen(self, client, screen_rect): + if client.x < screen_rect.x: # client's left edge + return False + if screen_rect.x + screen_rect.width < client.x + client.width: # right + return False + if client.y < screen_rect.y: # top + return False + if screen_rect.y + screen_rect.width < client.y + client.height: # bottom + return False + return True + def compute_client_position(self, client, screen_rect): """ recompute client.x and client.y, returning whether or not to place this client above other windows or not """ - above = False - transient_for = client.window.get_wm_transient_for() - win = client.group.qtile.windows_map.get(transient_for) - if win is not None: - # if transient for a window, place in the center of the window - center_x = win.x + win.width / 2 - center_y = win.y + win.height / 2 - else: - center_x = screen_rect.x + screen_rect.width / 2 - center_y = screen_rect.y + screen_rect.height / 2 - above = True - - x = center_x - client.width / 2 - y = center_y - client.height / 2 - - # don't go off the right... - x = min(x, screen_rect.x + screen_rect.width) - # or left... - x = max(x, screen_rect.x) - # or bottom... - y = min(y, screen_rect.y + screen_rect.height) - # or top - y = max(y, screen_rect.y) - - client.x = int(round(x)) - client.y = int(round(y)) + above = True + + if client.has_user_set_position() and not self.on_screen(client, screen_rect): + # move to screen + client.x = screen_rect.x + client.x + client.y = screen_rect.y + client.y + if not client.has_user_set_position() or not self.on_screen(client, screen_rect): + # client has not been properly placed before or it is off screen + transient_for = client.window.get_wm_transient_for() + win = client.group.qtile.windows_map.get(transient_for) + if win is not None: + # if transient for a window, place in the center of the window + center_x = win.x + win.width / 2 + center_y = win.y + win.height / 2 + above = False + else: + center_x = screen_rect.x + screen_rect.width / 2 + center_y = screen_rect.y + screen_rect.height / 2 + + x = center_x - client.width / 2 + y = center_y - client.height / 2 + + # don't go off the right... + x = min(x, screen_rect.x + screen_rect.width - client.width) + # or left... + x = max(x, screen_rect.x) + # or bottom... + y = min(y, screen_rect.y + screen_rect.height - client.height) + # or top + y = max(y, screen_rect.y) + + client.x = int(round(x)) + client.y = int(round(y)) return above def configure(self, client, screen_rect): @@ -231,14 +249,7 @@ client.paint_borders(bc, bw) client.cmd_bring_to_front() - # similar to above but the X11 version, the client may have already - # placed itself. let's respect that - elif client.has_user_set_position(): - client.paint_borders(bc, bw) - client.cmd_bring_to_front() - - # ok, it's not java and the window itself didn't position it, but users - # may still have asked us not to mess with it + # alternatively, users may have asked us explicitly to leave the client alone elif any(m.compare(client) for m in self.no_reposition_rules): client.paint_borders(bc, bw) client.cmd_bring_to_front()
{"golden_diff": "diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py\n--- a/libqtile/layout/floating.py\n+++ b/libqtile/layout/floating.py\n@@ -179,35 +179,53 @@\n def blur(self):\n self.focused = None\n \n+ def on_screen(self, client, screen_rect):\n+ if client.x < screen_rect.x: # client's left edge\n+ return False\n+ if screen_rect.x + screen_rect.width < client.x + client.width: # right\n+ return False\n+ if client.y < screen_rect.y: # top\n+ return False\n+ if screen_rect.y + screen_rect.width < client.y + client.height: # bottom\n+ return False\n+ return True\n+\n def compute_client_position(self, client, screen_rect):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n- above = False\n- transient_for = client.window.get_wm_transient_for()\n- win = client.group.qtile.windows_map.get(transient_for)\n- if win is not None:\n- # if transient for a window, place in the center of the window\n- center_x = win.x + win.width / 2\n- center_y = win.y + win.height / 2\n- else:\n- center_x = screen_rect.x + screen_rect.width / 2\n- center_y = screen_rect.y + screen_rect.height / 2\n- above = True\n-\n- x = center_x - client.width / 2\n- y = center_y - client.height / 2\n-\n- # don't go off the right...\n- x = min(x, screen_rect.x + screen_rect.width)\n- # or left...\n- x = max(x, screen_rect.x)\n- # or bottom...\n- y = min(y, screen_rect.y + screen_rect.height)\n- # or top\n- y = max(y, screen_rect.y)\n-\n- client.x = int(round(x))\n- client.y = int(round(y))\n+ above = True\n+\n+ if client.has_user_set_position() and not self.on_screen(client, screen_rect):\n+ # move to screen\n+ client.x = screen_rect.x + client.x\n+ client.y = screen_rect.y + client.y\n+ if not client.has_user_set_position() or not self.on_screen(client, screen_rect):\n+ # client has not been properly placed before or it is off screen\n+ transient_for = client.window.get_wm_transient_for()\n+ win = client.group.qtile.windows_map.get(transient_for)\n+ if win is not None:\n+ # if transient for a window, place in the center of the window\n+ center_x = win.x + win.width / 2\n+ center_y = win.y + win.height / 2\n+ above = False\n+ else:\n+ center_x = screen_rect.x + screen_rect.width / 2\n+ center_y = screen_rect.y + screen_rect.height / 2\n+\n+ x = center_x - client.width / 2\n+ y = center_y - client.height / 2\n+\n+ # don't go off the right...\n+ x = min(x, screen_rect.x + screen_rect.width - client.width)\n+ # or left...\n+ x = max(x, screen_rect.x)\n+ # or bottom...\n+ y = min(y, screen_rect.y + screen_rect.height - client.height)\n+ # or top\n+ y = max(y, screen_rect.y)\n+\n+ client.x = int(round(x))\n+ client.y = int(round(y))\n return above\n \n def configure(self, client, screen_rect):\n@@ -231,14 +249,7 @@\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n \n- # similar to above but the X11 version, the client may have already\n- # placed itself. let's respect that\n- elif client.has_user_set_position():\n- client.paint_borders(bc, bw)\n- client.cmd_bring_to_front()\n-\n- # ok, it's not java and the window itself didn't position it, but users\n- # may still have asked us not to mess with it\n+ # alternatively, users may have asked us explicitly to leave the client alone\n elif any(m.compare(client) for m in self.no_reposition_rules):\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n", "issue": "Popups and child-windows are drawn behind parent windows in certain situations\n# Issue description\r\nIn some situations I noticed that popups or child-windows are drawn (or moved) behind their respective parents; for child-windows this can be quite a problem in case the child is blocking, since then the software might be completely unresponsive.\r\n\r\nPlaces where I have noticed this and that I remember:\r\n1. Virtualbox (fullscreen) (the main window, not a client): Context menus on VM-list open in the background.\r\n2. Veracrypt (floating) \"mount\" child-window: The window either spawns in the back or is moved there once it loses focus.\r\n\r\nThere have been some other cases, but due to testing PR #1870, which I initially expected to be the cause for this, I haven't taken note of all of them.\r\nSome software definitely has working popup-windows, so Virtualbox might be doing something weird.\r\n\r\n# Qtile version\r\n\r\nhttps://github.com/qtile/qtile/commit/c6b80e9444e9b4e5dc2f52ca1954dc502c45f2eb\r\n(newer versions not tested yet)\r\n\r\n# Configuration\r\n\r\nI'll only include one snippet here, since verything else (setting up Keys and Widgets) most likely has nothing to do with the problem.\r\n\r\n```python\r\[email protected]_new\r\ndef modify_window(client):\r\n if (client.window.get_wm_transient_for() or client.window.get_wm_type() in floating_types):\r\n client.floating = True\r\n```\r\n\r\nOn top of this I am currently using fake screens instead of normal screens.\r\n\r\nIt would be nice if somebody could confirm this, since I am not entirely sure this isn't all caused by my own PR.\r\n\r\nRelated to #1870, which could fix at least the described issue with child-windows.\n", "before_files": [{"content": "# Copyright (c) 2010 matt\n# Copyright (c) 2010-2011 Paul Colomiets\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012, 2014-2015 Tycho Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Julien Iguchi-Cartigny\n# Copyright (c) 2014 ramnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 dequis\n# Copyright (c) 2018 Nazar Mokrynskyi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport warnings\n\nfrom libqtile.config import Match\nfrom libqtile.layout.base import Layout\nfrom libqtile.log_utils import logger\n\n\nclass Floating(Layout):\n \"\"\"\n Floating layout, which does nothing with windows but handles focus order\n \"\"\"\n defaults = [\n (\"border_focus\", \"#0000ff\", \"Border colour for the focused window.\"),\n (\"border_normal\", \"#000000\", \"Border colour for un-focused windows.\"),\n (\"border_width\", 1, \"Border width.\"),\n (\"max_border_width\", 0, \"Border width for maximize.\"),\n (\"fullscreen_border_width\", 0, \"Border width for fullscreen.\"),\n (\"name\", \"floating\", \"Name of this layout.\"),\n ]\n\n def __init__(self, float_rules=None, no_reposition_rules=None, **config):\n \"\"\"\n If you have certain apps that you always want to float you can provide\n ``float_rules`` to do so. ``float_rules`` are a list of\n Match objects::\n\n from libqtile.config import Match\n Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)\n\n When a new window is opened its ``match`` method is called with each of\n these rules. If one matches, the window will float. The following\n will float GIMP and Skype::\n\n from libqtile.config import Match\n float_rules=[Match(wm_class=\"skype\"), Match(wm_class=\"gimp\")]\n\n Specify these in the ``floating_layout`` in your config.\n\n Floating layout will try to center most of floating windows by default,\n but if you don't want this to happen for certain windows that are\n centered by mistake, you can use ``no_reposition_rules`` option to\n specify them and layout will rely on windows to position themselves in\n correct location on the screen.\n \"\"\"\n Layout.__init__(self, **config)\n self.clients = []\n self.focused = None\n self.group = None\n self.float_rules = float_rules or []\n\n warned = False\n for index, rule in enumerate(self.float_rules):\n if isinstance(rule, Match):\n continue\n\n if not warned:\n message = \"Non-config.Match objects in float_rules are \" \\\n \"deprecated\"\n warnings.warn(message, DeprecationWarning)\n logger.warning(message)\n warned = True\n\n match = Match(\n title=rule.get(\"wname\"), wm_class=rule.get(\"wmclass\"),\n role=rule.get(\"role\"), wm_type=rule.get(\"wm_type\"),\n wm_instance_class=rule.get(\"wm_instance_class\"),\n net_wm_pid=rule.get(\"net_wm_pid\"))\n\n self.float_rules[index] = match\n\n self.no_reposition_rules = no_reposition_rules or []\n self.add_defaults(Floating.defaults)\n\n def match(self, win):\n \"\"\"Used to default float some windows\"\"\"\n return any(win.match(rule) for rule in self.float_rules)\n\n def find_clients(self, group):\n \"\"\"Find all clients belonging to a given group\"\"\"\n return [c for c in self.clients if c.group is group]\n\n def to_screen(self, group, new_screen):\n \"\"\"Adjust offsets of clients within current screen\"\"\"\n for win in self.find_clients(group):\n if win.maximized:\n win.maximized = True\n elif win.fullscreen:\n win.fullscreen = True\n else:\n # catch if the client hasn't been configured\n try:\n # By default, place window at same offset from top corner\n new_x = new_screen.x + win.float_x\n new_y = new_screen.y + win.float_y\n except AttributeError:\n # this will be handled in .configure()\n pass\n else:\n # make sure window isn't off screen left/right...\n new_x = min(new_x, new_screen.x + new_screen.width - win.width)\n new_x = max(new_x, new_screen.x)\n # and up/down\n new_y = min(new_y, new_screen.y + new_screen.height - win.height)\n new_y = max(new_y, new_screen.y)\n\n win.x = new_x\n win.y = new_y\n win.group = new_screen.group\n\n def focus_first(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[0]\n\n def focus_next(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if len(clients) > idx + 1:\n return clients[idx + 1]\n\n def focus_last(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[-1]\n\n def focus_previous(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if idx > 0:\n return clients[idx - 1]\n\n def focus(self, client):\n self.focused = client\n\n def blur(self):\n self.focused = None\n\n def compute_client_position(self, client, screen_rect):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n above = False\n transient_for = client.window.get_wm_transient_for()\n win = client.group.qtile.windows_map.get(transient_for)\n if win is not None:\n # if transient for a window, place in the center of the window\n center_x = win.x + win.width / 2\n center_y = win.y + win.height / 2\n else:\n center_x = screen_rect.x + screen_rect.width / 2\n center_y = screen_rect.y + screen_rect.height / 2\n above = True\n\n x = center_x - client.width / 2\n y = center_y - client.height / 2\n\n # don't go off the right...\n x = min(x, screen_rect.x + screen_rect.width)\n # or left...\n x = max(x, screen_rect.x)\n # or bottom...\n y = min(y, screen_rect.y + screen_rect.height)\n # or top\n y = max(y, screen_rect.y)\n\n client.x = int(round(x))\n client.y = int(round(y))\n return above\n\n def configure(self, client, screen_rect):\n if client.has_focus:\n bc = self.border_focus\n else:\n bc = self.border_normal\n\n if client.maximized:\n bw = self.max_border_width\n elif client.fullscreen:\n bw = self.fullscreen_border_width\n else:\n bw = self.border_width\n\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n # don't reposition it anywhere, let Java app to control it\n cls = client.window.get_wm_class() or ''\n is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\n if is_java_dropdown:\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n # similar to above but the X11 version, the client may have already\n # placed itself. let's respect that\n elif client.has_user_set_position():\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n # ok, it's not java and the window itself didn't position it, but users\n # may still have asked us not to mess with it\n elif any(m.compare(client) for m in self.no_reposition_rules):\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n else:\n above = False\n\n # We definitely have a screen here, so let's be sure we'll float on screen\n try:\n client.float_x\n client.float_y\n except AttributeError:\n # this window hasn't been placed before, let's put it in a sensible spot\n above = self.compute_client_position(client, screen_rect)\n\n client.place(\n client.x,\n client.y,\n client.width,\n client.height,\n bw,\n bc,\n above,\n )\n client.unhide()\n\n def add(self, client):\n self.clients.append(client)\n self.focused = client\n\n def remove(self, client):\n if client not in self.clients:\n return\n\n next_focus = self.focus_next(client)\n if client is self.focused:\n self.blur()\n self.clients.remove(client)\n return next_focus\n\n def info(self):\n d = Layout.info(self)\n d[\"clients\"] = [c.name for c in self.clients]\n return d\n\n def cmd_next(self):\n # This can't ever be called, but implement the abstract method\n pass\n\n def cmd_previous(self):\n # This can't ever be called, but implement the abstract method\n pass\n", "path": "libqtile/layout/floating.py"}]}
4,081
1,013
gh_patches_debug_24628
rasdani/github-patches
git_diff
scverse__scanpy-1554
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> deprecate scvi in external I was wondering if we could deprecate the scvi external wrapper as we now have `scvi-tools`. I could also update the wrapper to have minimal functionality, but I think it would be better for people to use our API now that it's tightly integrated with scanpy anyway. </issue> <code> [start of scanpy/external/pp/_scvi.py] 1 import numpy as np 2 import pandas as pd 3 import scipy as sp 4 5 from typing import Optional, Sequence, Union 6 from anndata import AnnData 7 8 MIN_VERSION = "0.6.5" 9 10 11 def scvi( 12 adata: AnnData, 13 n_hidden: int = 128, 14 n_latent: int = 10, 15 n_layers: int = 1, 16 dispersion: str = "gene", 17 n_epochs: int = 400, 18 lr: int = 1e-3, 19 train_size: int = 1.0, 20 batch_key: Optional[str] = None, 21 use_highly_variable_genes: bool = True, 22 subset_genes: Optional[Sequence[Union[int, str]]] = None, 23 linear_decoder: bool = False, 24 copy: bool = False, 25 use_cuda: bool = True, 26 return_posterior: bool = True, 27 trainer_kwargs: dict = {}, 28 model_kwargs: dict = {}, 29 ) -> Optional[AnnData]: 30 """\ 31 SCVI [Lopez18]_. 32 33 Fits scVI model onto raw count data given an anndata object 34 35 scVI uses stochastic optimization and deep neural networks to aggregate information 36 across similar cells and genes and to approximate the distributions that underlie 37 observed expression values, while accounting for batch effects and limited sensitivity. 38 39 To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.), 40 set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can 41 be used to inspect which genes contribute to variation in the dataset. It may also be used 42 for all scVI tasks, like differential expression, batch correction, imputation, etc. 43 However, batch correction may be less powerful as it assumes a linear model. 44 45 .. note:: 46 More information and bug reports `here <https://github.com/YosefLab/scVI>`__. 47 48 Parameters 49 ---------- 50 adata 51 An anndata file with `X` attribute of unnormalized count data 52 n_hidden 53 Number of nodes per hidden layer 54 n_latent 55 Dimensionality of the latent space 56 n_layers 57 Number of hidden layers used for encoder and decoder NNs 58 dispersion 59 One of the following 60 * `'gene'` - dispersion parameter of NB is constant per gene across cells 61 * `'gene-batch'` - dispersion can differ between different batches 62 * `'gene-label'` - dispersion can differ between different labels 63 * `'gene-cell'` - dispersion can differ for every gene in every cell 64 n_epochs 65 Number of epochs to train 66 lr 67 Learning rate 68 train_size 69 The train size, either a float between 0 and 1 or an integer for the number of training samples to use 70 batch_key 71 Column name in anndata.obs for batches. 72 If None, no batch correction is performed 73 If not None, batch correction is performed per batch category 74 use_highly_variable_genes 75 If true, uses only the genes in anndata.var["highly_variable"] 76 subset_genes 77 Optional list of indices or gene names to subset anndata. 78 If not None, use_highly_variable_genes is ignored 79 linear_decoder 80 If true, uses LDVAE model, which is an implementation of [Svensson20]_. 81 copy 82 If true, a copy of anndata is returned 83 return_posterior 84 If true, posterior object is returned 85 use_cuda 86 If true, uses cuda 87 trainer_kwargs 88 Extra arguments for UnsupervisedTrainer 89 model_kwargs 90 Extra arguments for VAE or LDVAE model 91 92 Returns 93 ------- 94 If `copy` is true, anndata is returned. 95 If `return_posterior` is true, the posterior object is returned 96 If both `copy` and `return_posterior` are true, 97 a tuple of anndata and the posterior are returned in that order. 98 99 `adata.obsm['X_scvi']` stores the latent representations 100 `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial 101 `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial 102 103 If linear_decoder is true: 104 `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a 105 genes by n_latent matrix. 106 107 """ 108 109 try: 110 from scvi.models import VAE, LDVAE 111 from scvi.inference import UnsupervisedTrainer 112 from scvi.dataset import AnnDatasetFromAnnData 113 except ImportError: 114 raise ImportError( 115 "Please install scvi package from https://github.com/YosefLab/scVI" 116 ) 117 118 # check if observations are unnormalized using first 10 119 # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69 120 if len(adata) > 10: 121 X_subset = adata.X[:10] 122 else: 123 X_subset = adata.X 124 norm_error = ( 125 'Make sure that the dataset (adata.X) contains unnormalized count data.' 126 ) 127 if sp.sparse.issparse(X_subset): 128 assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error 129 else: 130 assert np.all(X_subset.astype(int) == X_subset), norm_error 131 132 if subset_genes is not None: 133 adata_subset = adata[:, subset_genes] 134 elif use_highly_variable_genes and "highly_variable" in adata.var: 135 adata_subset = adata[:, adata.var["highly_variable"]] 136 else: 137 adata_subset = adata 138 139 if batch_key is not None: 140 codes, uniques = pd.factorize(adata_subset.obs[batch_key]) 141 adata_subset.obs['_tmp_scvi_batch'] = codes 142 n_batches = len(uniques) 143 else: 144 n_batches = 0 145 146 dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch') 147 148 if linear_decoder: 149 vae = LDVAE( 150 n_input=dataset.nb_genes, 151 n_batch=n_batches, 152 n_labels=dataset.n_labels, 153 n_hidden=n_hidden, 154 n_latent=n_latent, 155 n_layers_encoder=n_layers, 156 dispersion=dispersion, 157 **model_kwargs, 158 ) 159 160 else: 161 vae = VAE( 162 dataset.nb_genes, 163 n_batch=n_batches, 164 n_labels=dataset.n_labels, 165 n_hidden=n_hidden, 166 n_latent=n_latent, 167 n_layers=n_layers, 168 dispersion=dispersion, 169 **model_kwargs, 170 ) 171 172 trainer = UnsupervisedTrainer( 173 model=vae, 174 gene_dataset=dataset, 175 use_cuda=use_cuda, 176 train_size=train_size, 177 **trainer_kwargs, 178 ) 179 180 trainer.train(n_epochs=n_epochs, lr=lr) 181 182 full = trainer.create_posterior( 183 trainer.model, dataset, indices=np.arange(len(dataset)) 184 ) 185 latent, batch_indices, labels = full.sequential().get_latent() 186 187 if copy: 188 adata = adata.copy() 189 190 adata.obsm['X_scvi'] = latent 191 adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale() 192 adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation() 193 194 if linear_decoder: 195 loadings = vae.get_loadings() 196 df = pd.DataFrame(loadings, index=adata_subset.var_names) 197 adata.uns['ldvae_loadings'] = df 198 199 if copy and return_posterior: 200 return adata, full 201 elif copy: 202 return adata 203 elif return_posterior: 204 return full 205 [end of scanpy/external/pp/_scvi.py] [start of setup.py] 1 import sys 2 3 if sys.version_info < (3, 6): 4 sys.exit('scanpy requires Python >= 3.6') 5 from pathlib import Path 6 7 from setuptools import setup, find_packages 8 9 try: 10 import pytoml 11 except ImportError: 12 sys.exit('Please use `pip install .` or install pytoml first.') 13 14 proj = pytoml.loads(Path('pyproject.toml').read_text()) 15 metadata = proj['tool']['scanpy'] 16 17 setup( 18 name='scanpy', 19 use_scm_version=True, 20 setup_requires=['setuptools_scm'], 21 description='Single-Cell Analysis in Python.', 22 long_description=Path('README.rst').read_text('utf-8'), 23 url='http://github.com/theislab/scanpy', 24 author=metadata['author'], 25 author_email=metadata['author-email'], 26 license='BSD', 27 python_requires='>=3.6', 28 install_requires=[ 29 l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines() 30 ], 31 extras_require=dict( 32 louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'], 33 leiden=['python-igraph', 'leidenalg'], 34 bbknn=['bbknn'], 35 scvi=['scvi>=0.6.5'], 36 rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'], 37 magic=['magic-impute>=2.0'], 38 skmisc=['scikit-misc>=0.1.3'], 39 harmony=['harmonypy'], 40 scrublet=['scrublet'], 41 dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'], 42 doc=[ 43 'sphinx>=3.2', 44 'sphinx_rtd_theme>=0.3.1', 45 'sphinx_autodoc_typehints', 46 'scanpydoc>=0.5', 47 'typing_extensions; python_version < "3.8"', # for `Literal` 48 ], 49 test=[ 50 'pytest>=4.4', 51 'dask[array]!=2.17.0', 52 'fsspec', 53 'zappy', 54 'zarr', 55 'profimp', 56 ], 57 ), 58 packages=find_packages(), 59 include_package_data=True, 60 entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']), 61 zip_safe=False, 62 classifiers=[ 63 'Development Status :: 5 - Production/Stable', 64 'Environment :: Console', 65 'Framework :: Jupyter', 66 'Intended Audience :: Developers', 67 'Intended Audience :: Science/Research', 68 'Natural Language :: English', 69 'Operating System :: MacOS :: MacOS X', 70 'Operating System :: Microsoft :: Windows', 71 'Operating System :: POSIX :: Linux', 72 'Programming Language :: Python :: 3', 73 'Programming Language :: Python :: 3.5', 74 'Programming Language :: Python :: 3.6', 75 'Programming Language :: Python :: 3.7', 76 'Topic :: Scientific/Engineering :: Bio-Informatics', 77 'Topic :: Scientific/Engineering :: Visualization', 78 ], 79 ) 80 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scanpy/external/pp/_scvi.py b/scanpy/external/pp/_scvi.py --- a/scanpy/external/pp/_scvi.py +++ b/scanpy/external/pp/_scvi.py @@ -1,3 +1,4 @@ +import warnings import numpy as np import pandas as pd import scipy as sp @@ -5,7 +6,7 @@ from typing import Optional, Sequence, Union from anndata import AnnData -MIN_VERSION = "0.6.5" +MIN_VERSION = "0.6.7" def scvi( @@ -105,6 +106,11 @@ genes by n_latent matrix. """ + warnings.warn( + "scvi via scanpy external API is no longer supported. " + + "Please use the new scvi-tools package from `scvi-tools.org`", + FutureWarning, + ) try: from scvi.models import VAE, LDVAE diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'], leiden=['python-igraph', 'leidenalg'], bbknn=['bbknn'], - scvi=['scvi>=0.6.5'], + scvi=['scvi==0.6.7'], rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'], magic=['magic-impute>=2.0'], skmisc=['scikit-misc>=0.1.3'],
{"golden_diff": "diff --git a/scanpy/external/pp/_scvi.py b/scanpy/external/pp/_scvi.py\n--- a/scanpy/external/pp/_scvi.py\n+++ b/scanpy/external/pp/_scvi.py\n@@ -1,3 +1,4 @@\n+import warnings\n import numpy as np\n import pandas as pd\n import scipy as sp\n@@ -5,7 +6,7 @@\n from typing import Optional, Sequence, Union\n from anndata import AnnData\n \n-MIN_VERSION = \"0.6.5\"\n+MIN_VERSION = \"0.6.7\"\n \n \n def scvi(\n@@ -105,6 +106,11 @@\n genes by n_latent matrix.\n \n \"\"\"\n+ warnings.warn(\n+ \"scvi via scanpy external API is no longer supported. \"\n+ + \"Please use the new scvi-tools package from `scvi-tools.org`\",\n+ FutureWarning,\n+ )\n \n try:\n from scvi.models import VAE, LDVAE\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n- scvi=['scvi>=0.6.5'],\n+ scvi=['scvi==0.6.7'],\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n magic=['magic-impute>=2.0'],\n skmisc=['scikit-misc>=0.1.3'],\n", "issue": "deprecate scvi in external\nI was wondering if we could deprecate the scvi external wrapper as we now have `scvi-tools`. I could also update the wrapper to have minimal functionality, but I think it would be better for people to use our API now that it's tightly integrated with scanpy anyway.\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nimport scipy as sp\n\nfrom typing import Optional, Sequence, Union\nfrom anndata import AnnData\n\nMIN_VERSION = \"0.6.5\"\n\n\ndef scvi(\n adata: AnnData,\n n_hidden: int = 128,\n n_latent: int = 10,\n n_layers: int = 1,\n dispersion: str = \"gene\",\n n_epochs: int = 400,\n lr: int = 1e-3,\n train_size: int = 1.0,\n batch_key: Optional[str] = None,\n use_highly_variable_genes: bool = True,\n subset_genes: Optional[Sequence[Union[int, str]]] = None,\n linear_decoder: bool = False,\n copy: bool = False,\n use_cuda: bool = True,\n return_posterior: bool = True,\n trainer_kwargs: dict = {},\n model_kwargs: dict = {},\n) -> Optional[AnnData]:\n \"\"\"\\\n SCVI [Lopez18]_.\n\n Fits scVI model onto raw count data given an anndata object\n\n scVI uses stochastic optimization and deep neural networks to aggregate information \n across similar cells and genes and to approximate the distributions that underlie\n observed expression values, while accounting for batch effects and limited sensitivity.\n\n To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.),\n set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can \n be used to inspect which genes contribute to variation in the dataset. It may also be used\n for all scVI tasks, like differential expression, batch correction, imputation, etc.\n However, batch correction may be less powerful as it assumes a linear model.\n\n .. note::\n More information and bug reports `here <https://github.com/YosefLab/scVI>`__.\n\n Parameters\n ----------\n adata\n An anndata file with `X` attribute of unnormalized count data\n n_hidden\n Number of nodes per hidden layer\n n_latent\n Dimensionality of the latent space\n n_layers\n Number of hidden layers used for encoder and decoder NNs\n dispersion\n One of the following\n * `'gene'` - dispersion parameter of NB is constant per gene across cells\n * `'gene-batch'` - dispersion can differ between different batches\n * `'gene-label'` - dispersion can differ between different labels\n * `'gene-cell'` - dispersion can differ for every gene in every cell\n n_epochs\n Number of epochs to train\n lr\n Learning rate\n train_size\n The train size, either a float between 0 and 1 or an integer for the number of training samples to use\n batch_key\n Column name in anndata.obs for batches. \n If None, no batch correction is performed\n If not None, batch correction is performed per batch category\n use_highly_variable_genes\n If true, uses only the genes in anndata.var[\"highly_variable\"]\n subset_genes\n Optional list of indices or gene names to subset anndata. \n If not None, use_highly_variable_genes is ignored\n linear_decoder\n If true, uses LDVAE model, which is an implementation of [Svensson20]_.\n copy\n If true, a copy of anndata is returned\n return_posterior\n If true, posterior object is returned\n use_cuda\n If true, uses cuda\n trainer_kwargs\n Extra arguments for UnsupervisedTrainer\n model_kwargs\n Extra arguments for VAE or LDVAE model\n \n Returns\n -------\n If `copy` is true, anndata is returned.\n If `return_posterior` is true, the posterior object is returned\n If both `copy` and `return_posterior` are true, \n a tuple of anndata and the posterior are returned in that order. \n\n `adata.obsm['X_scvi']` stores the latent representations\n `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial\n `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial\n \n If linear_decoder is true:\n `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a\n genes by n_latent matrix.\n\n \"\"\"\n\n try:\n from scvi.models import VAE, LDVAE\n from scvi.inference import UnsupervisedTrainer\n from scvi.dataset import AnnDatasetFromAnnData\n except ImportError:\n raise ImportError(\n \"Please install scvi package from https://github.com/YosefLab/scVI\"\n )\n\n # check if observations are unnormalized using first 10\n # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69\n if len(adata) > 10:\n X_subset = adata.X[:10]\n else:\n X_subset = adata.X\n norm_error = (\n 'Make sure that the dataset (adata.X) contains unnormalized count data.'\n )\n if sp.sparse.issparse(X_subset):\n assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error\n else:\n assert np.all(X_subset.astype(int) == X_subset), norm_error\n\n if subset_genes is not None:\n adata_subset = adata[:, subset_genes]\n elif use_highly_variable_genes and \"highly_variable\" in adata.var:\n adata_subset = adata[:, adata.var[\"highly_variable\"]]\n else:\n adata_subset = adata\n\n if batch_key is not None:\n codes, uniques = pd.factorize(adata_subset.obs[batch_key])\n adata_subset.obs['_tmp_scvi_batch'] = codes\n n_batches = len(uniques)\n else:\n n_batches = 0\n\n dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch')\n\n if linear_decoder:\n vae = LDVAE(\n n_input=dataset.nb_genes,\n n_batch=n_batches,\n n_labels=dataset.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers_encoder=n_layers,\n dispersion=dispersion,\n **model_kwargs,\n )\n\n else:\n vae = VAE(\n dataset.nb_genes,\n n_batch=n_batches,\n n_labels=dataset.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers=n_layers,\n dispersion=dispersion,\n **model_kwargs,\n )\n\n trainer = UnsupervisedTrainer(\n model=vae,\n gene_dataset=dataset,\n use_cuda=use_cuda,\n train_size=train_size,\n **trainer_kwargs,\n )\n\n trainer.train(n_epochs=n_epochs, lr=lr)\n\n full = trainer.create_posterior(\n trainer.model, dataset, indices=np.arange(len(dataset))\n )\n latent, batch_indices, labels = full.sequential().get_latent()\n\n if copy:\n adata = adata.copy()\n\n adata.obsm['X_scvi'] = latent\n adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale()\n adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation()\n\n if linear_decoder:\n loadings = vae.get_loadings()\n df = pd.DataFrame(loadings, index=adata_subset.var_names)\n adata.uns['ldvae_loadings'] = df\n\n if copy and return_posterior:\n return adata, full\n elif copy:\n return adata\n elif return_posterior:\n return full\n", "path": "scanpy/external/pp/_scvi.py"}, {"content": "import sys\n\nif sys.version_info < (3, 6):\n sys.exit('scanpy requires Python >= 3.6')\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\n\ntry:\n import pytoml\nexcept ImportError:\n sys.exit('Please use `pip install .` or install pytoml first.')\n\nproj = pytoml.loads(Path('pyproject.toml').read_text())\nmetadata = proj['tool']['scanpy']\n\nsetup(\n name='scanpy',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description='Single-Cell Analysis in Python.',\n long_description=Path('README.rst').read_text('utf-8'),\n url='http://github.com/theislab/scanpy',\n author=metadata['author'],\n author_email=metadata['author-email'],\n license='BSD',\n python_requires='>=3.6',\n install_requires=[\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n ],\n extras_require=dict(\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n scvi=['scvi>=0.6.5'],\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n magic=['magic-impute>=2.0'],\n skmisc=['scikit-misc>=0.1.3'],\n harmony=['harmonypy'],\n scrublet=['scrublet'],\n dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'],\n doc=[\n 'sphinx>=3.2',\n 'sphinx_rtd_theme>=0.3.1',\n 'sphinx_autodoc_typehints',\n 'scanpydoc>=0.5',\n 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n ],\n test=[\n 'pytest>=4.4',\n 'dask[array]!=2.17.0',\n 'fsspec',\n 'zappy',\n 'zarr',\n 'profimp',\n ],\n ),\n packages=find_packages(),\n include_package_data=True,\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n", "path": "setup.py"}]}
3,716
388
gh_patches_debug_6234
rasdani/github-patches
git_diff
liqd__a4-meinberlin-3564
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> moderators for b-plans if an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because: a) you don't see moderators in dashboard b) you can't follow a b-plan/external project c) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/) Should we take out this rule for these two templates or stop sending mails? <img width="698" alt="bildschirmfoto 2019-02-04 um 13 32 08" src="https://user-images.githubusercontent.com/35491681/52208589-762c0780-2881-11e9-9781-21826347abe4.png"> </issue> <code> [start of meinberlin/apps/notifications/signals.py] 1 from django.contrib.auth import get_user_model 2 from django.db.models import signals 3 from django.dispatch import receiver 4 5 from adhocracy4.actions.models import Action 6 from adhocracy4.actions.verbs import Verbs 7 from adhocracy4.dashboard import signals as dashboard_signals 8 from adhocracy4.follows.models import Follow 9 from adhocracy4.projects.models import Project 10 11 from . import emails 12 13 User = get_user_model() 14 15 16 @receiver(signals.post_save, sender=Action) 17 def send_notifications(instance, created, **kwargs): 18 action = instance 19 verb = Verbs(action.verb) 20 21 if action.type in ('item', 'comment') \ 22 and verb in (Verbs.CREATE, Verbs.ADD): 23 emails.NotifyCreatorEmail.send(action) 24 25 if action.project: 26 emails.NotifyModeratorsEmail.send(action) 27 28 elif action.type == 'phase': 29 if verb == Verbs.START: 30 emails.NotifyFollowersOnPhaseStartedEmail.send(action) 31 elif verb == Verbs.SCHEDULE: 32 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action) 33 34 elif action.type == 'offlineevent' and verb == Verbs.START: 35 emails.NotifyFollowersOnUpcommingEventEmail.send(action) 36 37 38 @receiver(dashboard_signals.project_created) 39 def send_project_created_notifications(**kwargs): 40 project = kwargs.get('project') 41 creator = kwargs.get('user') 42 emails.NotifyInitiatorsOnProjectCreatedEmail.send( 43 project, creator_pk=creator.pk) 44 45 46 @receiver(signals.m2m_changed, sender=Project.moderators.through) 47 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs): 48 if action == 'post_add': 49 autofollow_project(instance, pk_set, reverse) 50 51 52 def autofollow_project(instance, pk_set, reverse): 53 if not reverse: 54 project = instance 55 users_pks = pk_set 56 57 for user_pk in users_pks: 58 Follow.objects.update_or_create( 59 project=project, 60 creator_id=user_pk, 61 defaults={ 62 'enabled': True 63 } 64 ) 65 else: 66 user = instance 67 project_pks = pk_set 68 69 for project_pk in project_pks: 70 Follow.objects.update_or_create( 71 project_id=project_pk, 72 creator=user, 73 defaults={ 74 'enabled': True 75 } 76 ) 77 [end of meinberlin/apps/notifications/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py --- a/meinberlin/apps/notifications/signals.py +++ b/meinberlin/apps/notifications/signals.py @@ -25,7 +25,8 @@ if action.project: emails.NotifyModeratorsEmail.send(action) - elif action.type == 'phase': + elif (action.type == 'phase' and + action.project.project_type == 'a4projects.Project'): if verb == Verbs.START: emails.NotifyFollowersOnPhaseStartedEmail.send(action) elif verb == Verbs.SCHEDULE:
{"golden_diff": "diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py\n--- a/meinberlin/apps/notifications/signals.py\n+++ b/meinberlin/apps/notifications/signals.py\n@@ -25,7 +25,8 @@\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n \n- elif action.type == 'phase':\n+ elif (action.type == 'phase' and\n+ action.project.project_type == 'a4projects.Project'):\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n", "issue": "moderators for b-plans\nif an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:\r\na) you don't see moderators in dashboard\r\nb) you can't follow a b-plan/external project\r\nc) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)\r\n\r\nShould we take out this rule for these two templates or stop sending mails?\r\n\r\n\r\n<img width=\"698\" alt=\"bildschirmfoto 2019-02-04 um 13 32 08\" src=\"https://user-images.githubusercontent.com/35491681/52208589-762c0780-2881-11e9-9781-21826347abe4.png\">\r\n\n", "before_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in ('item', 'comment') \\\n and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif action.type == 'phase':\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == 'offlineevent' and verb == Verbs.START:\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get('project')\n creator = kwargs.get('user')\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(\n project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "meinberlin/apps/notifications/signals.py"}]}
1,431
142
gh_patches_debug_16491
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-734
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E0000 found unknown escape character ‘/’ version:1:1 cfn-lint --template vpc.cf.json E0000 found unknown escape character ‘/’ vpc.cf.json:12:135 this is the string that it says container the escape character error. this however works fine when deployed to the CFN service. "^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\\/([0-9]|[1-2][0-9]|3[0-2]))?$" ![image](https://user-images.githubusercontent.com/42137702/54364854-a11a3000-4665-11e9-8454-9ab8033fc1e7.png) </issue> <code> [start of src/cfnlint/decode/__init__.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import sys 18 import logging 19 import six 20 try: 21 from json.decoder import JSONDecodeError 22 except ImportError: 23 JSONDecodeError = ValueError 24 from yaml.parser import ParserError, ScannerError 25 from yaml import YAMLError 26 import cfnlint.decode.cfn_yaml 27 import cfnlint.decode.cfn_json 28 29 30 LOGGER = logging.getLogger(__name__) 31 32 33 def decode(filename, ignore_bad_template): 34 """ 35 Decode filename into an object 36 """ 37 template = None 38 matches = [] 39 try: 40 template = cfnlint.decode.cfn_yaml.load(filename) 41 except IOError as e: 42 if e.errno == 2: 43 LOGGER.error('Template file not found: %s', filename) 44 matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename)) 45 elif e.errno == 21: 46 LOGGER.error('Template references a directory, not a file: %s', filename) 47 matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename)) 48 elif e.errno == 13: 49 LOGGER.error('Permission denied when accessing template file: %s', filename) 50 matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename)) 51 52 if matches: 53 return(None, matches) 54 except UnicodeDecodeError as err: 55 LOGGER.error('Cannot read file contents: %s', filename) 56 matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename)) 57 except cfnlint.decode.cfn_yaml.CfnParseError as err: 58 err.match.Filename = filename 59 matches = [err.match] 60 61 except ParserError as err: 62 matches = [create_match_yaml_parser_error(err, filename)] 63 except ScannerError as err: 64 if err.problem == 'found character \'\\t\' that cannot start any token': 65 try: 66 template = cfnlint.decode.cfn_json.load(filename) 67 except cfnlint.decode.cfn_json.JSONDecodeError as json_err: 68 json_err.match.filename = filename 69 matches = [json_err.match] 70 except JSONDecodeError as json_err: 71 matches = [create_match_json_parser_error(json_err, filename)] 72 except Exception as json_err: # pylint: disable=W0703 73 if ignore_bad_template: 74 LOGGER.info('Template %s is malformed: %s', filename, err.problem) 75 LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err)) 76 else: 77 LOGGER.error('Template %s is malformed: %s', filename, err.problem) 78 LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err)) 79 return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))]) 80 else: 81 matches = [create_match_yaml_parser_error(err, filename)] 82 except YAMLError as err: 83 matches = [create_match_file_error(filename, err)] 84 85 if not isinstance(template, dict) and not matches: 86 # Template isn't a dict which means nearly nothing will work 87 matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')] 88 return (template, matches) 89 90 91 def create_match_yaml_parser_error(parser_error, filename): 92 """Create a Match for a parser error""" 93 lineno = parser_error.problem_mark.line + 1 94 colno = parser_error.problem_mark.column + 1 95 msg = parser_error.problem 96 return cfnlint.Match( 97 lineno, colno, lineno, colno + 1, filename, 98 cfnlint.ParseError(), message=msg) 99 100 101 def create_match_file_error(filename, msg): 102 """Create a Match for a parser error""" 103 return cfnlint.Match( 104 linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2, 105 filename=filename, rule=cfnlint.ParseError(), message=msg) 106 107 108 def create_match_json_parser_error(parser_error, filename): 109 """Create a Match for a parser error""" 110 if sys.version_info[0] == 3: 111 lineno = parser_error.lineno 112 colno = parser_error.colno 113 msg = parser_error.msg 114 elif sys.version_info[0] == 2: 115 lineno = 1 116 colno = 1 117 msg = parser_error.message 118 return cfnlint.Match( 119 lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg) 120 [end of src/cfnlint/decode/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py --- a/src/cfnlint/decode/__init__.py +++ b/src/cfnlint/decode/__init__.py @@ -57,11 +57,12 @@ except cfnlint.decode.cfn_yaml.CfnParseError as err: err.match.Filename = filename matches = [err.match] - except ParserError as err: matches = [create_match_yaml_parser_error(err, filename)] except ScannerError as err: - if err.problem == 'found character \'\\t\' that cannot start any token': + if err.problem in [ + 'found character \'\\t\' that cannot start any token', + 'found unknown escape character']: try: template = cfnlint.decode.cfn_json.load(filename) except cfnlint.decode.cfn_json.JSONDecodeError as json_err:
{"golden_diff": "diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py\n--- a/src/cfnlint/decode/__init__.py\n+++ b/src/cfnlint/decode/__init__.py\n@@ -57,11 +57,12 @@\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n-\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n- if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n+ if err.problem in [\n+ 'found character \\'\\\\t\\' that cannot start any token',\n+ 'found unknown escape character']:\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n", "issue": "E0000 found unknown escape character \u2018/\u2019\nversion:1:1\r\n\r\ncfn-lint --template vpc.cf.json\r\nE0000 found unknown escape character \u2018/\u2019\r\nvpc.cf.json:12:135\r\n\r\nthis is the string that it says container the escape character error. this however works fine when deployed to the CFN service. \r\n\r\n\"^([0-9]{1,3}\\\\.){3}[0-9]{1,3}(\\\\\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\r\n\r\n![image](https://user-images.githubusercontent.com/42137702/54364854-a11a3000-4665-11e9-8454-9ab8033fc1e7.png)\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport logging\nimport six\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\nfrom yaml.parser import ParserError, ScannerError\nfrom yaml import YAMLError\nimport cfnlint.decode.cfn_yaml\nimport cfnlint.decode.cfn_json\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef decode(filename, ignore_bad_template):\n \"\"\"\n Decode filename into an object\n \"\"\"\n template = None\n matches = []\n try:\n template = cfnlint.decode.cfn_yaml.load(filename)\n except IOError as e:\n if e.errno == 2:\n LOGGER.error('Template file not found: %s', filename)\n matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\n elif e.errno == 21:\n LOGGER.error('Template references a directory, not a file: %s', filename)\n matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\n elif e.errno == 13:\n LOGGER.error('Permission denied when accessing template file: %s', filename)\n matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\n\n if matches:\n return(None, matches)\n except UnicodeDecodeError as err:\n LOGGER.error('Cannot read file contents: %s', filename)\n matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n json_err.match.filename = filename\n matches = [json_err.match]\n except JSONDecodeError as json_err:\n matches = [create_match_json_parser_error(json_err, filename)]\n except Exception as json_err: # pylint: disable=W0703\n if ignore_bad_template:\n LOGGER.info('Template %s is malformed: %s', filename, err.problem)\n LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n else:\n LOGGER.error('Template %s is malformed: %s', filename, err.problem)\n LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\n else:\n matches = [create_match_yaml_parser_error(err, filename)]\n except YAMLError as err:\n matches = [create_match_file_error(filename, err)]\n\n if not isinstance(template, dict) and not matches:\n # Template isn't a dict which means nearly nothing will work\n matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\n return (template, matches)\n\n\ndef create_match_yaml_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n lineno = parser_error.problem_mark.line + 1\n colno = parser_error.problem_mark.column + 1\n msg = parser_error.problem\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename,\n cfnlint.ParseError(), message=msg)\n\n\ndef create_match_file_error(filename, msg):\n \"\"\"Create a Match for a parser error\"\"\"\n return cfnlint.Match(\n linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\n filename=filename, rule=cfnlint.ParseError(), message=msg)\n\n\ndef create_match_json_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n if sys.version_info[0] == 3:\n lineno = parser_error.lineno\n colno = parser_error.colno\n msg = parser_error.msg\n elif sys.version_info[0] == 2:\n lineno = 1\n colno = 1\n msg = parser_error.message\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\n", "path": "src/cfnlint/decode/__init__.py"}]}
2,203
206
gh_patches_debug_6009
rasdani/github-patches
git_diff
google__osv.dev-1021
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Maven 'zero' versions causes errors when comparing Maven versions that are 'zero' (e.g. `0.0.0` and `0.0.0-X.Y.Z`) cause assertion errors when comparing with non-zero versions, because the comparisons assume there's always an initial token without a `.` or `-` 'prefix'. This seems to be causing some of the missing vulnerabilities in #1018 </issue> <code> [start of osv/ecosystems/maven.py] 1 # Copyright 2021 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Maven ecosystem helper.""" 15 16 import collections 17 import json 18 import re 19 20 import urllib.parse 21 from . import config 22 from .helper_base import DepsDevMixin, EnumerateError 23 from ..cache import cached 24 from ..request_helper import RequestHelper 25 26 27 # pylint: disable=line-too-long 28 # Maven's very complicated spec: 29 # http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification 30 31 _TO_TRIM = ('0', '', 'final', 'ga') 32 _KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp') 33 34 35 def qualifier_order(token): 36 """Returns an integer representing a token's order.""" 37 # ".qualifier" < "-qualifier" < "-number" < ".number" 38 if token.value.isdigit(): 39 if token.prefix == '-': 40 return 2 41 42 assert token.prefix == '.' 43 return 3 44 45 if token.prefix == '-': 46 return 1 47 48 assert token.prefix == '.' 49 return 0 50 51 52 class VersionToken( 53 collections.namedtuple( 54 'VersionToken', 'prefix value is_null', defaults=(False,))): 55 """Version token.""" 56 57 __slots__ = () 58 59 def __eq__(self, other): 60 return self.prefix == other.prefix and self.value == other.value 61 62 def __lt__(self, other): 63 if self.prefix == other.prefix: 64 # if the prefix is the same, then compare the token: 65 if self.value.isdigit() and other.value.isdigit(): 66 # Numeric tokens have the natural order. 67 return int(self.value) < int(other.value) 68 # The spec is unclear, but according to Maven's implementation, numerics 69 # sort after non-numerics, **unless it's a null value**. 70 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443 71 if self.value.isdigit() and not self.is_null: 72 return False 73 74 if other.value.isdigit() and not other.is_null: 75 return True 76 77 # Non-numeric tokens ("qualifiers") have the alphabetical order, except 78 # for the following tokens which come first in _KEYWORD_ORDER. 79 # 80 # The spec is unclear, but according to Maven's implementation, unknown 81 # qualifiers sort after known qualifiers: 82 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423 83 try: 84 left_idx = _KEYWORD_ORDER.index(self.value) 85 except ValueError: 86 left_idx = len(_KEYWORD_ORDER) 87 88 try: 89 right_idx = _KEYWORD_ORDER.index(other.value) 90 except ValueError: 91 right_idx = len(_KEYWORD_ORDER) 92 93 if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER): 94 # Both are unknown qualifiers. Just do a lexical comparison. 95 return self.value < other.value 96 97 return left_idx < right_idx 98 99 # else ".qualifier" < "-qualifier" < "-number" < ".number" 100 return qualifier_order(self) < qualifier_order(other) 101 102 103 class Version: 104 """Maven version.""" 105 106 def __init__(self): 107 self.tokens = [] 108 109 def __str__(self): 110 result = '' 111 for token in self.tokens: 112 result += token.prefix + token.value 113 114 return result 115 116 def __eq__(self, other): 117 return self.tokens == other.tokens 118 119 def __lt__(self, other): 120 for i in range(max(len(self.tokens), len(other.tokens))): 121 # the shorter one padded with enough "null" values with matching prefix to 122 # have the same length as the longer one. Padded "null" values depend on 123 # the prefix of the other version: 0 for '.', "" for '-' 124 if i >= len(self.tokens): 125 if other.tokens[i].prefix == '.': 126 left = VersionToken('.', '0', is_null=True) 127 else: 128 assert other.tokens[i].prefix == '-' 129 left = VersionToken('-', '', is_null=True) 130 else: 131 left = self.tokens[i] 132 133 if i >= len(other.tokens): 134 if self.tokens[i].prefix == '.': 135 right = VersionToken('.', '0', is_null=True) 136 else: 137 assert self.tokens[i].prefix == '-' 138 right = VersionToken('-', '', is_null=True) 139 else: 140 right = other.tokens[i] 141 142 if left == right: 143 continue 144 145 return left < right 146 147 @classmethod 148 def from_string(cls, str_version): 149 """Parse a version.""" 150 version = Version() 151 152 # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-') 153 # and transitions between digits and characters. The prefix is recorded 154 # and will have effect on the order. 155 156 # Split and keep the delimiter. 157 tokens = re.split(r'([-.])', str_version) 158 for i in range(0, len(tokens), 2): 159 if i == 0: 160 # First token has no preceding prefix. 161 prefix = '' 162 else: 163 # Preceding prefix. 164 prefix = tokens[i - 1] 165 166 # A transition between digits and characters is equivalent to a hyphen. 167 # According to Maven's implementation: any non-digit is a "character": 168 # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627 169 170 # Find instances of <digit><non-digit> or <non-digit><digit>. 171 # ?= makes the regex non-consuming (needed to catch overlapping 172 # transitions such as <digit><non-digit><digit>). 173 # This gives an array of indices where each index is where a hyphen should be. 174 transitions = [ 175 m.span()[0] + 1 176 for m in re.finditer(r'(?=(\d[^\d]|[^\d]\d))', tokens[i]) 177 ] 178 # Add the last index so that our algorithm to split up the current token works. 179 transitions.append(len(tokens[i])) 180 181 prev_index = 0 182 for j, transition in enumerate(transitions): 183 if j > 0: 184 prefix = '-' 185 186 # The spec doesn't say this, but all qualifiers are case insensitive. 187 current = tokens[i][prev_index:transition].lower() 188 if not current: 189 # Empty tokens are replaced with "0". 190 current = '0' 191 192 # Normalize "cr" to "rc" for easier comparison since they are equal in 193 # precedence. 194 if current == 'cr': 195 current = 'rc' 196 197 # Also do this for 'ga', 'final' which are equivalent to empty string. 198 # "release" is not part of the spec but is implemented by Maven. 199 if current in ('ga', 'final', 'release'): 200 current = '' 201 202 # the "alpha", "beta" and "milestone" qualifiers can respectively be 203 # shortened to "a", "b" and "m" when directly followed by a number. 204 if transition != len(tokens[i]): 205 if current == 'a': 206 current = 'alpha' 207 208 if current == 'b': 209 current = 'beta' 210 211 if current == 'm': 212 current = 'milestone' 213 214 if current.isdigit(): 215 # Remove any leading zeros. 216 current = str(int(current)) 217 218 version.tokens.append(VersionToken(prefix, current)) 219 prev_index = transition 220 221 # Then, starting from the end of the version, the trailing "null" values 222 # (0, "", "final", "ga") are trimmed. 223 i = len(version.tokens) - 1 224 while i >= 0: 225 if version.tokens[i].value in _TO_TRIM: 226 version.tokens.pop(i) 227 i -= 1 228 continue 229 230 # This process is repeated at each remaining hyphen from end to start. 231 while i >= 0 and version.tokens[i].prefix != '-': 232 i -= 1 233 234 i -= 1 235 236 return version 237 238 239 class Maven(DepsDevMixin): 240 """Maven ecosystem.""" 241 242 _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select' 243 244 def sort_key(self, version): 245 """Sort key.""" 246 return Version.from_string(version) 247 248 @staticmethod 249 def _get_versions(package): 250 """Get versions.""" 251 versions = [] 252 request_helper = RequestHelper() 253 254 group_id, artifact_id = package.split(':', 2) 255 start = 0 256 257 while True: 258 query = { 259 'q': f'g:"{group_id}" AND a:"{artifact_id}"', 260 'core': 'gav', 261 'rows': '20', 262 'wt': 'json', 263 'start': start 264 } 265 url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query) 266 response = request_helper.get(url) 267 response = json.loads(response)['response'] 268 if response['numFound'] == 0: 269 raise EnumerateError(f'Package {package} not found') 270 271 for result in response['docs']: 272 versions.append(result['v']) 273 274 if len(versions) >= response['numFound']: 275 break 276 277 start = len(versions) 278 279 return versions 280 281 def enumerate_versions(self, 282 package, 283 introduced, 284 fixed=None, 285 last_affected=None, 286 limits=None): 287 """Enumerate versions.""" 288 if config.use_deps_dev: 289 return self._deps_dev_enumerate( 290 package, introduced, fixed, last_affected, limits=limits) 291 292 get_versions = self._get_versions 293 if config.shared_cache: 294 get_versions = cached(config.shared_cache)(get_versions) 295 296 versions = get_versions(package) 297 self.sort_versions(versions) 298 return self._get_affected_versions(versions, introduced, fixed, 299 last_affected, limits) 300 [end of osv/ecosystems/maven.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/osv/ecosystems/maven.py b/osv/ecosystems/maven.py --- a/osv/ecosystems/maven.py +++ b/osv/ecosystems/maven.py @@ -221,7 +221,7 @@ # Then, starting from the end of the version, the trailing "null" values # (0, "", "final", "ga") are trimmed. i = len(version.tokens) - 1 - while i >= 0: + while i > 0: # We always want at least one token for comparison if version.tokens[i].value in _TO_TRIM: version.tokens.pop(i) i -= 1
{"golden_diff": "diff --git a/osv/ecosystems/maven.py b/osv/ecosystems/maven.py\n--- a/osv/ecosystems/maven.py\n+++ b/osv/ecosystems/maven.py\n@@ -221,7 +221,7 @@\n # Then, starting from the end of the version, the trailing \"null\" values\n # (0, \"\", \"final\", \"ga\") are trimmed.\n i = len(version.tokens) - 1\n- while i >= 0:\n+ while i > 0: # We always want at least one token for comparison\n if version.tokens[i].value in _TO_TRIM:\n version.tokens.pop(i)\n i -= 1\n", "issue": "Maven 'zero' versions causes errors when comparing\nMaven versions that are 'zero' (e.g. `0.0.0` and `0.0.0-X.Y.Z`) cause assertion errors when comparing with non-zero versions, because the comparisons assume there's always an initial token without a `.` or `-` 'prefix'.\r\n\r\nThis seems to be causing some of the missing vulnerabilities in #1018\n", "before_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Maven ecosystem helper.\"\"\"\n\nimport collections\nimport json\nimport re\n\nimport urllib.parse\nfrom . import config\nfrom .helper_base import DepsDevMixin, EnumerateError\nfrom ..cache import cached\nfrom ..request_helper import RequestHelper\n\n\n# pylint: disable=line-too-long\n# Maven's very complicated spec:\n# http://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification\n\n_TO_TRIM = ('0', '', 'final', 'ga')\n_KEYWORD_ORDER = ('alpha', 'beta', 'milestone', 'rc', 'snapshot', '', 'sp')\n\n\ndef qualifier_order(token):\n \"\"\"Returns an integer representing a token's order.\"\"\"\n # \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n if token.value.isdigit():\n if token.prefix == '-':\n return 2\n\n assert token.prefix == '.'\n return 3\n\n if token.prefix == '-':\n return 1\n\n assert token.prefix == '.'\n return 0\n\n\nclass VersionToken(\n collections.namedtuple(\n 'VersionToken', 'prefix value is_null', defaults=(False,))):\n \"\"\"Version token.\"\"\"\n\n __slots__ = ()\n\n def __eq__(self, other):\n return self.prefix == other.prefix and self.value == other.value\n\n def __lt__(self, other):\n if self.prefix == other.prefix:\n # if the prefix is the same, then compare the token:\n if self.value.isdigit() and other.value.isdigit():\n # Numeric tokens have the natural order.\n return int(self.value) < int(other.value)\n # The spec is unclear, but according to Maven's implementation, numerics\n # sort after non-numerics, **unless it's a null value**.\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L443\n if self.value.isdigit() and not self.is_null:\n return False\n\n if other.value.isdigit() and not other.is_null:\n return True\n\n # Non-numeric tokens (\"qualifiers\") have the alphabetical order, except\n # for the following tokens which come first in _KEYWORD_ORDER.\n #\n # The spec is unclear, but according to Maven's implementation, unknown\n # qualifiers sort after known qualifiers:\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L423\n try:\n left_idx = _KEYWORD_ORDER.index(self.value)\n except ValueError:\n left_idx = len(_KEYWORD_ORDER)\n\n try:\n right_idx = _KEYWORD_ORDER.index(other.value)\n except ValueError:\n right_idx = len(_KEYWORD_ORDER)\n\n if left_idx == len(_KEYWORD_ORDER) and right_idx == len(_KEYWORD_ORDER):\n # Both are unknown qualifiers. Just do a lexical comparison.\n return self.value < other.value\n\n return left_idx < right_idx\n\n # else \".qualifier\" < \"-qualifier\" < \"-number\" < \".number\"\n return qualifier_order(self) < qualifier_order(other)\n\n\nclass Version:\n \"\"\"Maven version.\"\"\"\n\n def __init__(self):\n self.tokens = []\n\n def __str__(self):\n result = ''\n for token in self.tokens:\n result += token.prefix + token.value\n\n return result\n\n def __eq__(self, other):\n return self.tokens == other.tokens\n\n def __lt__(self, other):\n for i in range(max(len(self.tokens), len(other.tokens))):\n # the shorter one padded with enough \"null\" values with matching prefix to\n # have the same length as the longer one. Padded \"null\" values depend on\n # the prefix of the other version: 0 for '.', \"\" for '-'\n if i >= len(self.tokens):\n if other.tokens[i].prefix == '.':\n left = VersionToken('.', '0', is_null=True)\n else:\n assert other.tokens[i].prefix == '-'\n left = VersionToken('-', '', is_null=True)\n else:\n left = self.tokens[i]\n\n if i >= len(other.tokens):\n if self.tokens[i].prefix == '.':\n right = VersionToken('.', '0', is_null=True)\n else:\n assert self.tokens[i].prefix == '-'\n right = VersionToken('-', '', is_null=True)\n else:\n right = other.tokens[i]\n\n if left == right:\n continue\n\n return left < right\n\n @classmethod\n def from_string(cls, str_version):\n \"\"\"Parse a version.\"\"\"\n version = Version()\n\n # The Maven coordinate is split in tokens between dots ('.'), hyphens ('-')\n # and transitions between digits and characters. The prefix is recorded\n # and will have effect on the order.\n\n # Split and keep the delimiter.\n tokens = re.split(r'([-.])', str_version)\n for i in range(0, len(tokens), 2):\n if i == 0:\n # First token has no preceding prefix.\n prefix = ''\n else:\n # Preceding prefix.\n prefix = tokens[i - 1]\n\n # A transition between digits and characters is equivalent to a hyphen.\n # According to Maven's implementation: any non-digit is a \"character\":\n # https://github.com/apache/maven/blob/965aaa53da5c2d814e94a41d37142d0d6830375d/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/ComparableVersion.java#L627\n\n # Find instances of <digit><non-digit> or <non-digit><digit>.\n # ?= makes the regex non-consuming (needed to catch overlapping\n # transitions such as <digit><non-digit><digit>).\n # This gives an array of indices where each index is where a hyphen should be.\n transitions = [\n m.span()[0] + 1\n for m in re.finditer(r'(?=(\\d[^\\d]|[^\\d]\\d))', tokens[i])\n ]\n # Add the last index so that our algorithm to split up the current token works.\n transitions.append(len(tokens[i]))\n\n prev_index = 0\n for j, transition in enumerate(transitions):\n if j > 0:\n prefix = '-'\n\n # The spec doesn't say this, but all qualifiers are case insensitive.\n current = tokens[i][prev_index:transition].lower()\n if not current:\n # Empty tokens are replaced with \"0\".\n current = '0'\n\n # Normalize \"cr\" to \"rc\" for easier comparison since they are equal in\n # precedence.\n if current == 'cr':\n current = 'rc'\n\n # Also do this for 'ga', 'final' which are equivalent to empty string.\n # \"release\" is not part of the spec but is implemented by Maven.\n if current in ('ga', 'final', 'release'):\n current = ''\n\n # the \"alpha\", \"beta\" and \"milestone\" qualifiers can respectively be\n # shortened to \"a\", \"b\" and \"m\" when directly followed by a number.\n if transition != len(tokens[i]):\n if current == 'a':\n current = 'alpha'\n\n if current == 'b':\n current = 'beta'\n\n if current == 'm':\n current = 'milestone'\n\n if current.isdigit():\n # Remove any leading zeros.\n current = str(int(current))\n\n version.tokens.append(VersionToken(prefix, current))\n prev_index = transition\n\n # Then, starting from the end of the version, the trailing \"null\" values\n # (0, \"\", \"final\", \"ga\") are trimmed.\n i = len(version.tokens) - 1\n while i >= 0:\n if version.tokens[i].value in _TO_TRIM:\n version.tokens.pop(i)\n i -= 1\n continue\n\n # This process is repeated at each remaining hyphen from end to start.\n while i >= 0 and version.tokens[i].prefix != '-':\n i -= 1\n\n i -= 1\n\n return version\n\n\nclass Maven(DepsDevMixin):\n \"\"\"Maven ecosystem.\"\"\"\n\n _API_PACKAGE_URL = 'https://search.maven.org/solrsearch/select'\n\n def sort_key(self, version):\n \"\"\"Sort key.\"\"\"\n return Version.from_string(version)\n\n @staticmethod\n def _get_versions(package):\n \"\"\"Get versions.\"\"\"\n versions = []\n request_helper = RequestHelper()\n\n group_id, artifact_id = package.split(':', 2)\n start = 0\n\n while True:\n query = {\n 'q': f'g:\"{group_id}\" AND a:\"{artifact_id}\"',\n 'core': 'gav',\n 'rows': '20',\n 'wt': 'json',\n 'start': start\n }\n url = Maven._API_PACKAGE_URL + '?' + urllib.parse.urlencode(query)\n response = request_helper.get(url)\n response = json.loads(response)['response']\n if response['numFound'] == 0:\n raise EnumerateError(f'Package {package} not found')\n\n for result in response['docs']:\n versions.append(result['v'])\n\n if len(versions) >= response['numFound']:\n break\n\n start = len(versions)\n\n return versions\n\n def enumerate_versions(self,\n package,\n introduced,\n fixed=None,\n last_affected=None,\n limits=None):\n \"\"\"Enumerate versions.\"\"\"\n if config.use_deps_dev:\n return self._deps_dev_enumerate(\n package, introduced, fixed, last_affected, limits=limits)\n\n get_versions = self._get_versions\n if config.shared_cache:\n get_versions = cached(config.shared_cache)(get_versions)\n\n versions = get_versions(package)\n self.sort_versions(versions)\n return self._get_affected_versions(versions, introduced, fixed,\n last_affected, limits)\n", "path": "osv/ecosystems/maven.py"}]}
3,834
156
gh_patches_debug_2520
rasdani/github-patches
git_diff
cal-itp__benefits-1215
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor Agency dynamic headline into model prop Right now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62): ```python page = viewmodels.Page( title=_("core.pages.agency_index.title"), headline=_("core.pages.agency_index.mst_cc.headline"), button=button, classes="home", ) ``` This is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future. ## Acceptance Criteria <!-- Remember to consider edge cases --> - [ ] `agency_index` gets its headline from the selected `agency` </issue> <code> [start of benefits/core/views.py] 1 """ 2 The core application: view definition for the root of the webapp. 3 """ 4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError 5 from django.template import loader 6 from django.template.response import TemplateResponse 7 from django.urls import reverse 8 from django.utils.translation import pgettext, gettext as _ 9 10 from . import models, session, viewmodels 11 from .middleware import pageview_decorator 12 13 ROUTE_INDEX = "core:index" 14 ROUTE_ELIGIBILITY = "eligibility:index" 15 ROUTE_HELP = "core:help" 16 ROUTE_LOGGED_OUT = "core:logged_out" 17 18 TEMPLATE_INDEX = "core/index.html" 19 TEMPLATE_AGENCY = "core/agency_index.html" 20 TEMPLATE_HELP = "core/help.html" 21 TEMPLATE_LOGGED_OUT = "core/logged_out.html" 22 23 24 @pageview_decorator 25 def index(request): 26 """View handler for the main entry page.""" 27 session.reset(request) 28 29 page = viewmodels.Page( 30 title=_("core.pages.index.title"), 31 headline=_("core.pages.index.headline"), 32 modal=viewmodels.AgencySelector( 33 id="agency-selector", 34 aria_labelledby_id="agency-selector-modal-label", 35 button_text=_("core.pages.index.button"), 36 ), 37 ) 38 39 return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict()) 40 41 42 @pageview_decorator 43 def agency_index(request, agency): 44 """View handler for an agency entry page.""" 45 session.reset(request) 46 session.update(request, agency=agency, origin=agency.index_url) 47 48 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY)) 49 50 page = viewmodels.Page( 51 title=_("core.pages.agency_index.title"), 52 headline=_("core.pages.agency_index.mst_cc.headline"), 53 button=button, 54 ) 55 56 return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict()) 57 58 59 @pageview_decorator 60 def agency_public_key(request, agency): 61 """View handler returns an agency's public key as plain text.""" 62 return HttpResponse(agency.public_key_data, content_type="text/plain") 63 64 65 @pageview_decorator 66 def help(request): 67 """View handler for the help page.""" 68 if session.active_agency(request): 69 agency = session.agency(request) 70 buttons = viewmodels.Button.agency_contact_links(agency) 71 else: 72 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)] 73 74 buttons.append(viewmodels.Button.home(request, _("core.buttons.back"))) 75 76 page = viewmodels.Page( 77 title=_("core.buttons.help"), 78 headline=_("core.buttons.help"), 79 buttons=buttons, 80 ) 81 82 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict()) 83 84 85 @pageview_decorator 86 def bad_request(request, exception, template_name="400.html"): 87 """View handler for HTTP 400 Bad Request responses.""" 88 if session.active_agency(request): 89 session.update(request, origin=session.agency(request).index_url) 90 else: 91 session.update(request, origin=reverse(ROUTE_INDEX)) 92 93 home = viewmodels.Button.home(request) 94 page = viewmodels.ErrorPage.server_error(button=home) 95 t = loader.get_template(template_name) 96 97 return HttpResponseBadRequest(t.render(page.context_dict())) 98 99 100 @pageview_decorator 101 def csrf_failure(request, reason): 102 """ 103 View handler for CSRF_FAILURE_VIEW with custom data. 104 """ 105 if session.active_agency(request): 106 session.update(request, origin=session.agency(request).index_url) 107 else: 108 session.update(request, origin=reverse(ROUTE_INDEX)) 109 110 home = viewmodels.Button.home(request) 111 page = viewmodels.ErrorPage.not_found(button=home, path=request.path) 112 t = loader.get_template("400.html") 113 114 return HttpResponseNotFound(t.render(page.context_dict())) 115 116 117 @pageview_decorator 118 def page_not_found(request, exception, template_name="404.html"): 119 """View handler for HTTP 404 Not Found responses.""" 120 if session.active_agency(request): 121 session.update(request, origin=session.agency(request).index_url) 122 else: 123 session.update(request, origin=reverse(ROUTE_INDEX)) 124 125 home = viewmodels.Button.home(request) 126 # show a more user-friendly message instead of not_found 127 page = viewmodels.ErrorPage.user_error(button=home, path=request.path) 128 t = loader.get_template(template_name) 129 130 return HttpResponseNotFound(t.render(page.context_dict())) 131 132 133 @pageview_decorator 134 def server_error(request, template_name="500.html"): 135 """View handler for HTTP 500 Server Error responses.""" 136 if session.active_agency(request): 137 session.update(request, origin=session.agency(request).index_url) 138 else: 139 session.update(request, origin=reverse(ROUTE_INDEX)) 140 141 home = viewmodels.Button.home(request) 142 page = viewmodels.ErrorPage.server_error(button=home) 143 t = loader.get_template(template_name) 144 145 return HttpResponseServerError(t.render(page.context_dict())) 146 147 148 def logged_out(request): 149 """View handler for the final log out confirmation message.""" 150 page = viewmodels.Page( 151 title=_("core.pages.logged_out.title"), 152 icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")), 153 ) 154 155 return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict()) 156 [end of benefits/core/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/core/views.py b/benefits/core/views.py --- a/benefits/core/views.py +++ b/benefits/core/views.py @@ -49,7 +49,8 @@ page = viewmodels.Page( title=_("core.pages.agency_index.title"), - headline=_("core.pages.agency_index.mst_cc.headline"), + headline=_("core.pages.agency_index.headline%(transit_agency_short_name)s") + % {"transit_agency_short_name": agency.short_name}, button=button, )
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -49,7 +49,8 @@\n \n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n- headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n+ headline=_(\"core.pages.agency_index.headline%(transit_agency_short_name)s\")\n+ % {\"transit_agency_short_name\": agency.short_name},\n button=button,\n )\n", "issue": "Refactor Agency dynamic headline into model prop\nRight now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):\r\n\r\n```python\r\npage = viewmodels.Page(\r\n title=_(\"core.pages.agency_index.title\"),\r\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\r\n button=button,\r\n classes=\"home\",\r\n )\r\n```\r\n\r\nThis is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] `agency_index` gets its headline from the selected `agency`\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_INDEX = \"core/index.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\nTEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n modal=viewmodels.AgencySelector(\n id=\"agency-selector\",\n aria_labelledby_id=\"agency-selector-modal-label\",\n button_text=_(\"core.pages.index.button\"),\n ),\n )\n\n return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n button=button,\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "path": "benefits/core/views.py"}]}
2,167
123
gh_patches_debug_13285
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-341
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> La Veneciana (Argentina) Ice cream shop. HTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html </issue> <code> [start of locations/spiders/laveneciana.py] 1 import scrapy 2 import re 3 from locations.items import GeojsonPointItem 4 class LavenecianaSpider(scrapy.Spider): 5 name = "laveneciana" 6 allowed_domains = ["www.laveneciana.com.ar"] 7 download_delay = 0.5 8 start_urls = ( 9 'http://www.laveneciana.com.ar/sucursales.html', 10 ) 11 def parse(self, response): 12 stores = response.xpath('//div[@class="navigation-container"]/div[@id="thumbs"]/ul[@class="thumbs noscript"]/li') 13 for store in stores: 14 addr_full_tel = store.xpath('normalize-space(./div[@class="caption"]/div[@class="image-desc"]/text())').extract_first() 15 location = store.xpath('normalize-space(./div[@class="caption"]/div[@class="ubicacion"]/iframe/@src)').extract_first() 16 position = re.findall(r"ll=[0-9-.,]+" ,location) 17 id = re.findall(r"cid=[0-9]+" ,location) 18 if(len(position)>0): 19 lat =float( position[0][3:].split(',')[0]) 20 lon = float(position[0][3:].split(',')[1]) 21 id = id[0][4:] 22 else: 23 lat='' 24 lon='' 25 id='' 26 addr_full = re.findall(r"^[^()]{4}[^(.)]+" , addr_full_tel)[0] 27 phone_number = re.findall(r"[0-9]{4}-[0-9]{4}",addr_full_tel) 28 if(len(phone_number)>0): 29 phone_number = phone_number[0] 30 else: 31 phone_number ='' 32 if(addr_full!="Direccion"): 33 properties = { 34 'addr_full': addr_full, 35 'phone':phone_number, 36 'city': '', 37 'state': '', 38 'postcode':'', 39 'ref': id, 40 'website': response.url, 41 'lat': lat, 42 'lon': lon, 43 } 44 yield GeojsonPointItem(**properties) 45 [end of locations/spiders/laveneciana.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py --- a/locations/spiders/laveneciana.py +++ b/locations/spiders/laveneciana.py @@ -23,13 +23,9 @@ lat='' lon='' id='' - addr_full = re.findall(r"^[^()]{4}[^(.)]+" , addr_full_tel)[0] - phone_number = re.findall(r"[0-9]{4}-[0-9]{4}",addr_full_tel) - if(len(phone_number)>0): - phone_number = phone_number[0] - else: - phone_number ='' - if(addr_full!="Direccion"): + addr_full = addr_full_tel.split('Tel.: ')[0] + phone_number = addr_full_tel.split('Tel.: ')[1] + if(addr_full!="Direccion... "): properties = { 'addr_full': addr_full, 'phone':phone_number,
{"golden_diff": "diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py\n--- a/locations/spiders/laveneciana.py\n+++ b/locations/spiders/laveneciana.py\n@@ -23,13 +23,9 @@\n lat=''\n lon=''\n id=''\n- addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n- phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n- if(len(phone_number)>0):\n- phone_number = phone_number[0]\n- else:\n- phone_number =''\n- if(addr_full!=\"Direccion\"):\n+ addr_full = addr_full_tel.split('Tel.: ')[0]\n+ phone_number = addr_full_tel.split('Tel.: ')[1]\n+ if(addr_full!=\"Direccion... \"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n", "issue": "La Veneciana (Argentina)\nIce cream shop.\r\n\r\nHTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nclass LavenecianaSpider(scrapy.Spider):\n name = \"laveneciana\"\n allowed_domains = [\"www.laveneciana.com.ar\"]\n download_delay = 0.5\n start_urls = (\n 'http://www.laveneciana.com.ar/sucursales.html',\n )\n def parse(self, response):\n stores = response.xpath('//div[@class=\"navigation-container\"]/div[@id=\"thumbs\"]/ul[@class=\"thumbs noscript\"]/li')\n for store in stores:\n addr_full_tel = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"image-desc\"]/text())').extract_first()\n location = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"ubicacion\"]/iframe/@src)').extract_first()\n position = re.findall(r\"ll=[0-9-.,]+\" ,location)\n id = re.findall(r\"cid=[0-9]+\" ,location)\n if(len(position)>0):\n lat =float( position[0][3:].split(',')[0])\n lon = float(position[0][3:].split(',')[1])\n id = id[0][4:]\n else:\n lat=''\n lon=''\n id=''\n addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n if(len(phone_number)>0):\n phone_number = phone_number[0]\n else:\n phone_number =''\n if(addr_full!=\"Direccion\"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n 'city': '',\n 'state': '',\n 'postcode':'',\n 'ref': id,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/laveneciana.py"}]}
1,080
219
gh_patches_debug_60373
rasdani/github-patches
git_diff
UTNkar__moore-151
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Paragraph block alignment <!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] --> See image: ![image](https://user-images.githubusercontent.com/29704138/27686692-2cadaf20-5cd4-11e7-92bf-caa849baa276.png) [Description of the issue] ### Steps to Reproduce 1. [First Step] 2. [Second Step] 3. [and so on...] <!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label --> </issue> <code> [start of website/blocks/models.py] 1 from wagtail.wagtailcore import blocks 2 from wagtail.wagtailimages.blocks import ImageChooserBlock 3 4 from django.utils.translation import ugettext_lazy as _ 5 6 7 class CountersBlock(blocks.StructBlock): 8 title = blocks.CharBlock() 9 counters = blocks.ListBlock(blocks.StructBlock([ 10 ('icon', blocks.CharBlock( 11 help_text=_('Material icon font icon text, as found on: ' 12 'https://material.io/icons'), 13 )), 14 ('value', blocks.CharBlock()), 15 ('description', blocks.CharBlock(required=False)) 16 ])) 17 style = blocks.ChoiceBlock(choices=[ 18 ('light', _('Light')), 19 ('dark', _('Dark')), 20 ]) 21 22 class Meta: 23 label = _('Counters') 24 icon = 'fa-balance-scale' 25 template = 'blocks/counter.html' 26 27 28 class HeadingBlock(blocks.StructBlock): 29 title = blocks.CharBlock(required=True) 30 subtitle = blocks.CharBlock(required=False) 31 32 class Meta: 33 label = _('Heading') 34 icon = 'fa-header' 35 template = 'blocks/title.html' 36 37 38 class ImageDescriptionBlock(blocks.StructBlock): 39 description = blocks.RichTextBlock() 40 image = ImageChooserBlock() 41 image_alignment = blocks.ChoiceBlock(choices=[ 42 ('left', _('Left')), 43 ('right', _('Right')), 44 ]) 45 hide_on_med = blocks.BooleanBlock(required=False) 46 47 class Meta: 48 label = _('Image + Description') 49 icon = 'fa-file-image-o ' 50 template = 'blocks/image_description.html' 51 52 53 class ImageIconsBlock(blocks.StructBlock): 54 title = blocks.CharBlock() 55 image = ImageChooserBlock() 56 image_alignment = blocks.ChoiceBlock(choices=[ 57 ('left', _('Left')), 58 ('right', _('Right')), 59 ]) 60 icons = blocks.ListBlock(blocks.StructBlock([ 61 ('icon', blocks.CharBlock( 62 help_text=_('Material icon font icon text, as found on: ' 63 'https://material.io/icons'), 64 )), 65 ('title', blocks.CharBlock()), 66 ('description', blocks.CharBlock()) 67 ])) 68 hide_on_med = blocks.BooleanBlock(required=False) 69 70 class Meta: 71 label = _('Image + Icons') 72 icon = 'fa-file-excel-o' 73 template = 'blocks/image_icons.html' 74 75 76 class OverlayBlock(blocks.StructBlock): 77 image = ImageChooserBlock() 78 title = blocks.CharBlock(required=False) 79 description = blocks.CharBlock(required=False) 80 81 link = blocks.URLBlock(required=False) 82 button = blocks.CharBlock(required=False) 83 84 class Meta: 85 label = _('Image overlay') 86 icon = 'fa-clone' 87 template = 'blocks/overlay.html' 88 89 90 WAGTAIL_STATIC_BLOCKTYPES = [ 91 ('heading', HeadingBlock()), 92 ('paragraph', blocks.RichTextBlock()), 93 ('image_description', ImageIconsBlock()), 94 ('image_icons', ImageDescriptionBlock()), 95 ('overlay', OverlayBlock()), 96 ('logos', blocks.ListBlock( 97 ImageChooserBlock(), 98 icon='fa-pied-piper', 99 template='blocks/logos.html', 100 label=_('Logos'), 101 )), 102 ('counters', CountersBlock()), 103 ('image', ImageChooserBlock(template='blocks/image.html')), 104 ] 105 [end of website/blocks/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/blocks/models.py b/website/blocks/models.py --- a/website/blocks/models.py +++ b/website/blocks/models.py @@ -89,7 +89,7 @@ WAGTAIL_STATIC_BLOCKTYPES = [ ('heading', HeadingBlock()), - ('paragraph', blocks.RichTextBlock()), + ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')), ('image_description', ImageIconsBlock()), ('image_icons', ImageDescriptionBlock()), ('overlay', OverlayBlock()),
{"golden_diff": "diff --git a/website/blocks/models.py b/website/blocks/models.py\n--- a/website/blocks/models.py\n+++ b/website/blocks/models.py\n@@ -89,7 +89,7 @@\n \n WAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n- ('paragraph', blocks.RichTextBlock()),\n+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n", "issue": "Paragraph block alignment\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\nSee image:\r\n\r\n![image](https://user-images.githubusercontent.com/29704138/27686692-2cadaf20-5cd4-11e7-92bf-caa849baa276.png)\r\n\r\n[Description of the issue]\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CountersBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n counters = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('value', blocks.CharBlock()),\n ('description', blocks.CharBlock(required=False))\n ]))\n style = blocks.ChoiceBlock(choices=[\n ('light', _('Light')),\n ('dark', _('Dark')),\n ])\n\n class Meta:\n label = _('Counters')\n icon = 'fa-balance-scale'\n template = 'blocks/counter.html'\n\n\nclass HeadingBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True)\n subtitle = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Heading')\n icon = 'fa-header'\n template = 'blocks/title.html'\n\n\nclass ImageDescriptionBlock(blocks.StructBlock):\n description = blocks.RichTextBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Description')\n icon = 'fa-file-image-o '\n template = 'blocks/image_description.html'\n\n\nclass ImageIconsBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n icons = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('title', blocks.CharBlock()),\n ('description', blocks.CharBlock())\n ]))\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Icons')\n icon = 'fa-file-excel-o'\n template = 'blocks/image_icons.html'\n\n\nclass OverlayBlock(blocks.StructBlock):\n image = ImageChooserBlock()\n title = blocks.CharBlock(required=False)\n description = blocks.CharBlock(required=False)\n\n link = blocks.URLBlock(required=False)\n button = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Image overlay')\n icon = 'fa-clone'\n template = 'blocks/overlay.html'\n\n\nWAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n ('paragraph', blocks.RichTextBlock()),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n ('logos', blocks.ListBlock(\n ImageChooserBlock(),\n icon='fa-pied-piper',\n template='blocks/logos.html',\n label=_('Logos'),\n )),\n ('counters', CountersBlock()),\n ('image', ImageChooserBlock(template='blocks/image.html')),\n]\n", "path": "website/blocks/models.py"}]}
1,555
117
gh_patches_debug_16424
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-3520
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL If you are having errors like: ``` qt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method qt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new qt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init qt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error qt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error ``` with PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\Qt\bin\`) to your output dir or your frozen binary in a similar path. In my final specfile, it looks like this: ```python # -*- mode: python -*- block_cipher = None a = Analysis(['cddagl\\launcher.py'], pathex=['C:\\Program Files (x86)\\Windows Kits\\10\\Redist\\ucrt\\DLLs\\x86\\', 'C:\\Users\\remy\\Projects\\CDDA-Game-Launcher'], binaries=[('C:\\Users\\remy\\VirtualEnvs\\CDDA-Game-Launcher\\lib\\site-packages\\PyQt5\\Qt\\bin\\libeay32.dll', 'PyQt5\\Qt\\bin'), ('C:\\Users\\remy\\VirtualEnvs\\CDDA-Game-Launcher\\lib\\site-packages\\PyQt5\\Qt\\bin\\ssleay32.dll', 'PyQt5\\Qt\\bin')], datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\Users\\remy\\AppData\\Local\\Programs\\Python\\Python36-32\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')], hiddenimports=['lxml.cssselect', 'babel.numbers'], hookspath=[], runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, a.binaries, a.zipfiles, a.datas, name='launcher', debug=True, strip=False, upx=False, runtime_tmpdir=None, console=True , icon='cddagl\\resources\\launcher.ico') ``` I'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770** </issue> <code> [start of PyInstaller/hooks/hook-PyQt5.QtNetwork.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2018, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 from PyInstaller.utils.hooks import add_qt5_dependencies 10 11 hiddenimports, binaries, datas = add_qt5_dependencies(__file__) 12 [end of PyInstaller/hooks/hook-PyQt5.QtNetwork.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py --- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py +++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py @@ -6,6 +6,21 @@ # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- -from PyInstaller.utils.hooks import add_qt5_dependencies +import os.path + +from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies +from PyInstaller.compat import is_win hiddenimports, binaries, datas = add_qt5_dependencies(__file__) + +# Add libraries needed for SSL. See issue #3520. +if is_win: + rel_data_path = ['PyQt5', 'Qt', 'bin'] + binaries += [ + (os.path.join(pyqt5_library_info.location['BinariesPath'], + 'libeay32.dll'), + os.path.join(*rel_data_path)), + (os.path.join(pyqt5_library_info.location['BinariesPath'], + 'ssleay32.dll'), + os.path.join(*rel_data_path)) + ]
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n@@ -6,6 +6,21 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-from PyInstaller.utils.hooks import add_qt5_dependencies\n+import os.path\n+\n+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\n+from PyInstaller.compat import is_win\n \n hiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n+\n+# Add libraries needed for SSL. See issue #3520.\n+if is_win:\n+ rel_data_path = ['PyQt5', 'Qt', 'bin']\n+ binaries += [\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'libeay32.dll'),\n+ os.path.join(*rel_data_path)),\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'ssleay32.dll'),\n+ os.path.join(*rel_data_path))\n+ ]\n", "issue": "libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL\nIf you are having errors like:\r\n\r\n```\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\n```\r\n\r\nwith PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\\Qt\\bin\\`) to your output dir or your frozen binary in a similar path.\r\n\r\nIn my final specfile, it looks like this:\r\n\r\n```python\r\n# -*- mode: python -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\remy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\libeay32.dll', 'PyQt5\\\\Qt\\\\bin'), ('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\ssleay32.dll', 'PyQt5\\\\Qt\\\\bin')],\r\n datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\\\Users\\\\remy\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36-32\\\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n name='launcher',\r\n debug=True,\r\n strip=False,\r\n upx=False,\r\n runtime_tmpdir=None,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\n```\r\n\r\nI'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import add_qt5_dependencies\n\nhiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n", "path": "PyInstaller/hooks/hook-PyQt5.QtNetwork.py"}]}
1,404
278
gh_patches_debug_9014
rasdani/github-patches
git_diff
stephenmcd__mezzanine-1517
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Duplicate form fields in admin for user profiles As discussed here: https://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM </issue> <code> [start of mezzanine/accounts/admin.py] 1 from __future__ import unicode_literals 2 3 from django.contrib import admin 4 from django.contrib.auth import get_user_model 5 from mezzanine.accounts import get_profile_model, ProfileNotConfigured 6 7 from mezzanine.core.admin import SitePermissionUserAdmin 8 from mezzanine.conf import settings 9 from mezzanine.utils.email import send_approved_mail, send_verification_mail 10 11 12 User = get_user_model() 13 14 user_list_display = SitePermissionUserAdmin.list_display 15 user_list_display += ("is_active", "date_joined", "last_login") 16 17 18 class UserProfileAdmin(SitePermissionUserAdmin): 19 20 list_display = user_list_display 21 22 def save_model(self, request, obj, form, change): 23 """ 24 If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``, 25 send a notification email to the user being saved if their 26 ``active`` status has changed to ``True``. 27 If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``, 28 send a verification email instead. 29 """ 30 must_send_verification_mail_after_save = False 31 if change and settings.ACCOUNTS_APPROVAL_REQUIRED: 32 if obj.is_active and not User.objects.get(id=obj.id).is_active: 33 if settings.ACCOUNTS_VERIFICATION_REQUIRED: 34 # Accounts verification requires an inactive account 35 obj.is_active = False 36 # The token generated by send_verification_mail() 37 # must match the _saved_ User object, 38 # so postpone send_verification_mail() until later 39 must_send_verification_mail_after_save = True 40 else: 41 send_approved_mail(request, obj) 42 super(UserProfileAdmin, self).save_model(request, obj, form, change) 43 if must_send_verification_mail_after_save: 44 user = User.objects.get(id=obj.id) 45 send_verification_mail(request, user, "signup_verify") 46 47 48 try: 49 class ProfileInline(admin.StackedInline): 50 model = get_profile_model() 51 can_delete = False 52 template = "admin/profile_inline.html" 53 extra = 0 54 UserProfileAdmin.inlines += (ProfileInline,) 55 except ProfileNotConfigured: 56 pass 57 58 59 if User in admin.site._registry: 60 admin.site.unregister(User) 61 admin.site.register(User, UserProfileAdmin) 62 [end of mezzanine/accounts/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py --- a/mezzanine/accounts/admin.py +++ b/mezzanine/accounts/admin.py @@ -51,6 +51,13 @@ can_delete = False template = "admin/profile_inline.html" extra = 0 + + def get_min_num(self, request, obj=None, **kwargs): + """This causes profile forms to be shown when editing but hidden + when creating. If min_num is fixed at 1, Django's initial user + creation form fails if the profile model has a required field.""" + return 0 if obj is None else 1 + UserProfileAdmin.inlines += (ProfileInline,) except ProfileNotConfigured: pass
{"golden_diff": "diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py\n--- a/mezzanine/accounts/admin.py\n+++ b/mezzanine/accounts/admin.py\n@@ -51,6 +51,13 @@\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n+\n+ def get_min_num(self, request, obj=None, **kwargs):\n+ \"\"\"This causes profile forms to be shown when editing but hidden\n+ when creating. If min_num is fixed at 1, Django's initial user\n+ creation form fails if the profile model has a required field.\"\"\"\n+ return 0 if obj is None else 1\n+\n UserProfileAdmin.inlines += (ProfileInline,)\n except ProfileNotConfigured:\n pass\n", "issue": "Duplicate form fields in admin for user profiles\nAs discussed here:\n\nhttps://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom mezzanine.accounts import get_profile_model, ProfileNotConfigured\n\nfrom mezzanine.core.admin import SitePermissionUserAdmin\nfrom mezzanine.conf import settings\nfrom mezzanine.utils.email import send_approved_mail, send_verification_mail\n\n\nUser = get_user_model()\n\nuser_list_display = SitePermissionUserAdmin.list_display\nuser_list_display += (\"is_active\", \"date_joined\", \"last_login\")\n\n\nclass UserProfileAdmin(SitePermissionUserAdmin):\n\n list_display = user_list_display\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\n send a notification email to the user being saved if their\n ``active`` status has changed to ``True``.\n If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\n send a verification email instead.\n \"\"\"\n must_send_verification_mail_after_save = False\n if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\n if obj.is_active and not User.objects.get(id=obj.id).is_active:\n if settings.ACCOUNTS_VERIFICATION_REQUIRED:\n # Accounts verification requires an inactive account\n obj.is_active = False\n # The token generated by send_verification_mail()\n # must match the _saved_ User object,\n # so postpone send_verification_mail() until later\n must_send_verification_mail_after_save = True\n else:\n send_approved_mail(request, obj)\n super(UserProfileAdmin, self).save_model(request, obj, form, change)\n if must_send_verification_mail_after_save:\n user = User.objects.get(id=obj.id)\n send_verification_mail(request, user, \"signup_verify\")\n\n\ntry:\n class ProfileInline(admin.StackedInline):\n model = get_profile_model()\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n UserProfileAdmin.inlines += (ProfileInline,)\nexcept ProfileNotConfigured:\n pass\n\n\nif User in admin.site._registry:\n admin.site.unregister(User)\nadmin.site.register(User, UserProfileAdmin)\n", "path": "mezzanine/accounts/admin.py"}]}
1,156
168
gh_patches_debug_2536
rasdani/github-patches
git_diff
optuna__optuna-122
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `TPESampler._sample_categorical` fails with PostgreSQL backend `TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because: - `TPESampler._sample_categorical` returns an integer as `numpy.int32`. - The integer value is input to storage class without any cast. - SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one. **Repro Steps** With any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as: ``` study = pfnopt.create_study(storage=SOME_POSTGRES_URL) pfnopt.minimize(objective, n_trials=100, study=study) ``` It fails after running trials `n_startup_trails` times. </issue> <code> [start of pfnopt/samplers/tpe.py] 1 import math 2 import numpy 3 from typing import List # NOQA 4 from typing import Optional # NOQA 5 6 from pfnopt import distributions # NOQA 7 from pfnopt.samplers import _hyperopt 8 from pfnopt.samplers import base 9 from pfnopt.samplers import random 10 from pfnopt.storages.base import BaseStorage # NOQA 11 12 13 class TPESampler(base.BaseSampler): 14 15 def __init__(self, 16 prior_weight=_hyperopt.default_prior_weight, 17 n_startup_trials=_hyperopt.default_n_startup_trials, 18 n_ei_candidates=_hyperopt.default_n_ei_candidates, 19 gamma=_hyperopt.default_gamma, 20 seed=None): 21 # type: (float, int, int, float, Optional[int]) -> None 22 self.prior_weight = prior_weight 23 self.n_startup_trials = n_startup_trials 24 self.n_ei_candidates = n_ei_candidates 25 self.gamma = gamma 26 self.seed = seed 27 28 self.rng = numpy.random.RandomState(seed) 29 self.random_sampler = random.RandomSampler(seed=seed) 30 31 def sample(self, storage, study_id, param_name, param_distribution): 32 # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float 33 observation_pairs = storage.get_trial_param_result_pairs( 34 study_id, param_name) 35 n = len(observation_pairs) 36 37 # TODO(Akiba): this behavior is slightly different from hyperopt 38 if n < self.n_startup_trials: 39 return self.random_sampler.sample(storage, study_id, param_name, param_distribution) 40 41 below_param_values, above_param_values = _hyperopt.ap_filter_trials( 42 range(n), [p[0] for p in observation_pairs], 43 range(n), [p[1] for p in observation_pairs], 44 self.gamma) 45 46 if isinstance(param_distribution, distributions.UniformDistribution): 47 return self._sample_uniform( 48 param_distribution, below_param_values, above_param_values) 49 elif isinstance(param_distribution, distributions.LogUniformDistribution): 50 return self._sample_loguniform( 51 param_distribution, below_param_values, above_param_values) 52 elif isinstance(param_distribution, distributions.CategoricalDistribution): 53 return self._sample_categorical( 54 param_distribution, below_param_values, above_param_values) 55 else: 56 raise NotImplementedError 57 58 def _sample_uniform(self, distribution, below, above): 59 # type: (distributions.UniformDistribution, List[float], List[float]) -> float 60 return _hyperopt.sample_uniform( 61 obs_below=below, obs_above=above, prior_weight=self.prior_weight, 62 low=distribution.low, high=distribution.high, 63 size=(self.n_ei_candidates,), rng=self.rng) 64 65 def _sample_loguniform(self, distribution, below, above): 66 # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float 67 68 return _hyperopt.sample_loguniform( 69 obs_below=below, obs_above=above, prior_weight=self.prior_weight, 70 # `sample_loguniform` generates values in [exp(low), exp(high)] 71 low=math.log(distribution.low), 72 high=math.log(distribution.high), 73 size=(self.n_ei_candidates,), rng=self.rng) 74 75 def _sample_categorical(self, distribution, below, above): 76 # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float 77 choices = distribution.choices 78 below = list(map(int, below)) 79 above = list(map(int, above)) 80 idx = _hyperopt.sample_categorical( 81 obs_below=below, obs_above=above, prior_weight=self.prior_weight, 82 upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng) 83 return idx 84 [end of pfnopt/samplers/tpe.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py --- a/pfnopt/samplers/tpe.py +++ b/pfnopt/samplers/tpe.py @@ -80,4 +80,4 @@ idx = _hyperopt.sample_categorical( obs_below=below, obs_above=above, prior_weight=self.prior_weight, upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng) - return idx + return int(idx)
{"golden_diff": "diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py\n--- a/pfnopt/samplers/tpe.py\n+++ b/pfnopt/samplers/tpe.py\n@@ -80,4 +80,4 @@\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n- return idx\n+ return int(idx)\n", "issue": "`TPESampler._sample_categorical` fails with PostgreSQL backend\n`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:\r\n- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.\r\n- The integer value is input to storage class without any cast.\r\n- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.\r\n\r\n**Repro Steps**\r\nWith any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:\r\n```\r\nstudy = pfnopt.create_study(storage=SOME_POSTGRES_URL)\r\npfnopt.minimize(objective, n_trials=100, study=study)\r\n```\r\n\r\nIt fails after running trials `n_startup_trails` times.\n", "before_files": [{"content": "import math\nimport numpy\nfrom typing import List # NOQA\nfrom typing import Optional # NOQA\n\nfrom pfnopt import distributions # NOQA\nfrom pfnopt.samplers import _hyperopt\nfrom pfnopt.samplers import base\nfrom pfnopt.samplers import random\nfrom pfnopt.storages.base import BaseStorage # NOQA\n\n\nclass TPESampler(base.BaseSampler):\n\n def __init__(self,\n prior_weight=_hyperopt.default_prior_weight,\n n_startup_trials=_hyperopt.default_n_startup_trials,\n n_ei_candidates=_hyperopt.default_n_ei_candidates,\n gamma=_hyperopt.default_gamma,\n seed=None):\n # type: (float, int, int, float, Optional[int]) -> None\n self.prior_weight = prior_weight\n self.n_startup_trials = n_startup_trials\n self.n_ei_candidates = n_ei_candidates\n self.gamma = gamma\n self.seed = seed\n\n self.rng = numpy.random.RandomState(seed)\n self.random_sampler = random.RandomSampler(seed=seed)\n\n def sample(self, storage, study_id, param_name, param_distribution):\n # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\n observation_pairs = storage.get_trial_param_result_pairs(\n study_id, param_name)\n n = len(observation_pairs)\n\n # TODO(Akiba): this behavior is slightly different from hyperopt\n if n < self.n_startup_trials:\n return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\n\n below_param_values, above_param_values = _hyperopt.ap_filter_trials(\n range(n), [p[0] for p in observation_pairs],\n range(n), [p[1] for p in observation_pairs],\n self.gamma)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n return self._sample_categorical(\n param_distribution, below_param_values, above_param_values)\n else:\n raise NotImplementedError\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, List[float], List[float]) -> float\n return _hyperopt.sample_uniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n low=distribution.low, high=distribution.high,\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\n\n return _hyperopt.sample_loguniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n # `sample_loguniform` generates values in [exp(low), exp(high)]\n low=math.log(distribution.low),\n high=math.log(distribution.high),\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_categorical(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n return idx\n", "path": "pfnopt/samplers/tpe.py"}]}
1,665
121
gh_patches_debug_34056
rasdani/github-patches
git_diff
modin-project__modin-2806
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [ASV] use `wait` function to get the right performance times </issue> <code> [start of asv_bench/benchmarks/scalability/scalability_benchmarks.py] 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 import modin.pandas as pd 15 from modin.pandas.utils import from_pandas 16 from modin.utils import to_pandas 17 import pandas 18 19 from ..utils import ( 20 gen_data, 21 generate_dataframe, 22 RAND_LOW, 23 RAND_HIGH, 24 ASV_DATASET_SIZE, 25 UNARY_OP_DATA_SIZE, 26 execute, 27 ) 28 29 30 class TimeFromPandas: 31 param_names = ["shape", "cpus"] 32 params = [ 33 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE], 34 [4, 16, 32], 35 ] 36 37 def setup(self, shape, cpus): 38 self.data = pandas.DataFrame(gen_data("int", *shape, RAND_LOW, RAND_HIGH)) 39 from modin.config import NPartitions 40 41 NPartitions.get = lambda: cpus 42 # trigger ray init 43 pd.DataFrame([]) 44 45 def time_from_pandas(self, shape, cpus): 46 execute(from_pandas(self.data)) 47 48 49 class TimeToPandas: 50 param_names = ["shape", "cpus"] 51 params = [ 52 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE], 53 [4, 16, 32], 54 ] 55 56 def setup(self, shape, cpus): 57 from modin.config import NPartitions 58 59 NPartitions.get = lambda: cpus 60 self.data = generate_dataframe("modin", "int", *shape, RAND_LOW, RAND_HIGH) 61 62 def time_to_pandas(self, shape, cpus): 63 execute(to_pandas(self.data)) 64 [end of asv_bench/benchmarks/scalability/scalability_benchmarks.py] [start of asv_bench/benchmarks/utils.py] 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 import os 15 import logging 16 import modin.pandas as pd 17 import pandas 18 import numpy as np 19 import uuid 20 21 RAND_LOW = 0 22 RAND_HIGH = 100 23 random_state = np.random.RandomState(seed=42) 24 25 26 try: 27 from modin.config import NPartitions 28 29 NPARTITIONS = NPartitions.get() 30 except ImportError: 31 NPARTITIONS = pd.DEFAULT_NPARTITIONS 32 33 try: 34 from modin.config import TestDatasetSize, AsvImplementation 35 36 ASV_USE_IMPL = AsvImplementation.get() 37 ASV_DATASET_SIZE = TestDatasetSize.get() or "Small" 38 except ImportError: 39 # The same benchmarking code can be run for different versions of Modin, so in 40 # case of an error importing important variables, we'll just use predefined values 41 ASV_USE_IMPL = os.environ.get("MODIN_ASV_USE_IMPL", "modin") 42 ASV_DATASET_SIZE = os.environ.get("MODIN_TEST_DATASET_SIZE", "Small") 43 44 assert ASV_USE_IMPL in ("modin", "pandas") 45 46 BINARY_OP_DATA_SIZE = { 47 "Big": [ 48 ((5000, 5000), (5000, 5000)), 49 # the case extremely inefficient 50 # ((20, 500_000), (10, 1_000_000)), 51 ((500_000, 20), (1_000_000, 10)), 52 ], 53 "Small": [ 54 ((250, 250), (250, 250)), 55 ((20, 10_000), (10, 25_000)), 56 ((10_000, 20), (25_000, 10)), 57 ], 58 } 59 60 UNARY_OP_DATA_SIZE = { 61 "Big": [ 62 (5000, 5000), 63 # the case extremely inefficient 64 # (10, 1_000_000), 65 (1_000_000, 10), 66 ], 67 "Small": [ 68 (250, 250), 69 (10, 10_000), 70 (10_000, 10), 71 ], 72 } 73 74 GROUPBY_NGROUPS = { 75 "Big": [100, "huge_amount_groups"], 76 "Small": [5], 77 } 78 79 IMPL = { 80 "modin": pd, 81 "pandas": pandas, 82 } 83 84 85 def translator_groupby_ngroups(groupby_ngroups, shape): 86 if ASV_DATASET_SIZE == "Big": 87 if groupby_ngroups == "huge_amount_groups": 88 return min(shape[0] // 2, 5000) 89 return groupby_ngroups 90 else: 91 return groupby_ngroups 92 93 94 class weakdict(dict): 95 __slots__ = ("__weakref__",) 96 97 98 data_cache = dict() 99 dataframes_cache = dict() 100 101 102 def gen_int_data(nrows, ncols, rand_low, rand_high): 103 cache_key = ("int", nrows, ncols, rand_low, rand_high) 104 if cache_key in data_cache: 105 return data_cache[cache_key] 106 107 logging.info( 108 "Generating int data {} rows and {} columns [{}-{}]".format( 109 nrows, ncols, rand_low, rand_high 110 ) 111 ) 112 data = { 113 "col{}".format(i): random_state.randint(rand_low, rand_high, size=(nrows)) 114 for i in range(ncols) 115 } 116 data_cache[cache_key] = weakdict(data) 117 return data 118 119 120 def gen_str_int_data(nrows, ncols, rand_low, rand_high): 121 cache_key = ("str_int", nrows, ncols, rand_low, rand_high) 122 if cache_key in data_cache: 123 return data_cache[cache_key] 124 125 logging.info( 126 "Generating str_int data {} rows and {} columns [{}-{}]".format( 127 nrows, ncols, rand_low, rand_high 128 ) 129 ) 130 data = gen_int_data(nrows, ncols, rand_low, rand_high).copy() 131 data["gb_col"] = [ 132 "str_{}".format(random_state.randint(rand_low, rand_high)) for i in range(nrows) 133 ] 134 data_cache[cache_key] = weakdict(data) 135 return data 136 137 138 def gen_data(data_type, nrows, ncols, rand_low, rand_high): 139 if data_type == "int": 140 return gen_int_data(nrows, ncols, rand_low, rand_high) 141 elif data_type == "str_int": 142 return gen_str_int_data(nrows, ncols, rand_low, rand_high) 143 else: 144 assert False 145 146 147 def generate_dataframe( 148 impl, 149 data_type, 150 nrows, 151 ncols, 152 rand_low, 153 rand_high, 154 groupby_ncols=None, 155 count_groups=None, 156 ): 157 assert not ( 158 (groupby_ncols is None) ^ (count_groups is None) 159 ), "You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them." 160 161 if groupby_ncols and count_groups: 162 ncols -= groupby_ncols 163 cache_key = ( 164 impl, 165 data_type, 166 nrows, 167 ncols, 168 rand_low, 169 rand_high, 170 groupby_ncols, 171 count_groups, 172 ) 173 else: 174 cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high) 175 176 if cache_key in dataframes_cache: 177 return dataframes_cache[cache_key] 178 179 logging.info( 180 "Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]".format( 181 impl, data_type, nrows, ncols, rand_low, rand_high 182 ) 183 ) 184 data = gen_data(data_type, nrows, ncols, rand_low, rand_high) 185 186 if groupby_ncols and count_groups: 187 groupby_columns = [f"groupby_col{x}" for x in range(groupby_ncols)] 188 for groupby_col in groupby_columns: 189 data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups) 190 191 if impl == "modin": 192 df = pd.DataFrame(data) 193 elif impl == "pandas": 194 df = pandas.DataFrame(data) 195 else: 196 assert False 197 198 if groupby_ncols and count_groups: 199 dataframes_cache[cache_key] = df, groupby_columns 200 return df, groupby_columns 201 202 dataframes_cache[cache_key] = df 203 return df 204 205 206 def random_string(): 207 return str(uuid.uuid1()) 208 209 210 def random_columns(df_columns, columns_number): 211 return list(random_state.choice(df_columns, size=columns_number)) 212 213 214 def random_booleans(number): 215 return list(random_state.choice([True, False], size=number)) 216 217 218 def execute(df): 219 "Make sure the calculations are done." 220 return df.shape, df.dtypes 221 222 223 def get_shape_id(array): 224 return "_".join([str(element) for element in array]) 225 [end of asv_bench/benchmarks/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/asv_bench/benchmarks/scalability/scalability_benchmarks.py b/asv_bench/benchmarks/scalability/scalability_benchmarks.py --- a/asv_bench/benchmarks/scalability/scalability_benchmarks.py +++ b/asv_bench/benchmarks/scalability/scalability_benchmarks.py @@ -60,4 +60,5 @@ self.data = generate_dataframe("modin", "int", *shape, RAND_LOW, RAND_HIGH) def time_to_pandas(self, shape, cpus): - execute(to_pandas(self.data)) + # to_pandas is already synchronous + to_pandas(self.data) diff --git a/asv_bench/benchmarks/utils.py b/asv_bench/benchmarks/utils.py --- a/asv_bench/benchmarks/utils.py +++ b/asv_bench/benchmarks/utils.py @@ -31,15 +31,17 @@ NPARTITIONS = pd.DEFAULT_NPARTITIONS try: - from modin.config import TestDatasetSize, AsvImplementation + from modin.config import TestDatasetSize, AsvImplementation, Engine ASV_USE_IMPL = AsvImplementation.get() ASV_DATASET_SIZE = TestDatasetSize.get() or "Small" + ASV_USE_ENGINE = Engine.get() except ImportError: # The same benchmarking code can be run for different versions of Modin, so in # case of an error importing important variables, we'll just use predefined values ASV_USE_IMPL = os.environ.get("MODIN_ASV_USE_IMPL", "modin") ASV_DATASET_SIZE = os.environ.get("MODIN_TEST_DATASET_SIZE", "Small") + ASV_USE_ENGINE = os.environ.get("MODIN_ENGINE", "Ray") assert ASV_USE_IMPL in ("modin", "pandas") @@ -217,7 +219,24 @@ def execute(df): "Make sure the calculations are done." - return df.shape, df.dtypes + if ASV_USE_IMPL == "modin": + partitions = df._query_compiler._modin_frame._partitions + map(lambda partition: partition.drain_call_queue(), partitions) + if ASV_USE_ENGINE == "Ray": + from ray import wait + + map(lambda partition: wait(partition.oid), partitions) + elif ASV_USE_ENGINE == "Dask": + from dask.distributed import wait + + map(lambda partition: wait(partition.future), partitions) + elif ASV_USE_ENGINE == "Python": + pass + + elif ASV_USE_IMPL == "pandas": + pass + else: + raise ValueError(f"wrong value of {ASV_USE_IMPL}") def get_shape_id(array):
{"golden_diff": "diff --git a/asv_bench/benchmarks/scalability/scalability_benchmarks.py b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n--- a/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n+++ b/asv_bench/benchmarks/scalability/scalability_benchmarks.py\n@@ -60,4 +60,5 @@\n self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n \n def time_to_pandas(self, shape, cpus):\n- execute(to_pandas(self.data))\n+ # to_pandas is already synchronous\n+ to_pandas(self.data)\ndiff --git a/asv_bench/benchmarks/utils.py b/asv_bench/benchmarks/utils.py\n--- a/asv_bench/benchmarks/utils.py\n+++ b/asv_bench/benchmarks/utils.py\n@@ -31,15 +31,17 @@\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\n \n try:\n- from modin.config import TestDatasetSize, AsvImplementation\n+ from modin.config import TestDatasetSize, AsvImplementation, Engine\n \n ASV_USE_IMPL = AsvImplementation.get()\n ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\n+ ASV_USE_ENGINE = Engine.get()\n except ImportError:\n # The same benchmarking code can be run for different versions of Modin, so in\n # case of an error importing important variables, we'll just use predefined values\n ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n+ ASV_USE_ENGINE = os.environ.get(\"MODIN_ENGINE\", \"Ray\")\n \n assert ASV_USE_IMPL in (\"modin\", \"pandas\")\n \n@@ -217,7 +219,24 @@\n \n def execute(df):\n \"Make sure the calculations are done.\"\n- return df.shape, df.dtypes\n+ if ASV_USE_IMPL == \"modin\":\n+ partitions = df._query_compiler._modin_frame._partitions\n+ map(lambda partition: partition.drain_call_queue(), partitions)\n+ if ASV_USE_ENGINE == \"Ray\":\n+ from ray import wait\n+\n+ map(lambda partition: wait(partition.oid), partitions)\n+ elif ASV_USE_ENGINE == \"Dask\":\n+ from dask.distributed import wait\n+\n+ map(lambda partition: wait(partition.future), partitions)\n+ elif ASV_USE_ENGINE == \"Python\":\n+ pass\n+\n+ elif ASV_USE_IMPL == \"pandas\":\n+ pass\n+ else:\n+ raise ValueError(f\"wrong value of {ASV_USE_IMPL}\")\n \n \n def get_shape_id(array):\n", "issue": "[ASV] use `wait` function to get the right performance times\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport modin.pandas as pd\nfrom modin.pandas.utils import from_pandas\nfrom modin.utils import to_pandas\nimport pandas\n\nfrom ..utils import (\n gen_data,\n generate_dataframe,\n RAND_LOW,\n RAND_HIGH,\n ASV_DATASET_SIZE,\n UNARY_OP_DATA_SIZE,\n execute,\n)\n\n\nclass TimeFromPandas:\n param_names = [\"shape\", \"cpus\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [4, 16, 32],\n ]\n\n def setup(self, shape, cpus):\n self.data = pandas.DataFrame(gen_data(\"int\", *shape, RAND_LOW, RAND_HIGH))\n from modin.config import NPartitions\n\n NPartitions.get = lambda: cpus\n # trigger ray init\n pd.DataFrame([])\n\n def time_from_pandas(self, shape, cpus):\n execute(from_pandas(self.data))\n\n\nclass TimeToPandas:\n param_names = [\"shape\", \"cpus\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [4, 16, 32],\n ]\n\n def setup(self, shape, cpus):\n from modin.config import NPartitions\n\n NPartitions.get = lambda: cpus\n self.data = generate_dataframe(\"modin\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n\n def time_to_pandas(self, shape, cpus):\n execute(to_pandas(self.data))\n", "path": "asv_bench/benchmarks/scalability/scalability_benchmarks.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport logging\nimport modin.pandas as pd\nimport pandas\nimport numpy as np\nimport uuid\n\nRAND_LOW = 0\nRAND_HIGH = 100\nrandom_state = np.random.RandomState(seed=42)\n\n\ntry:\n from modin.config import NPartitions\n\n NPARTITIONS = NPartitions.get()\nexcept ImportError:\n NPARTITIONS = pd.DEFAULT_NPARTITIONS\n\ntry:\n from modin.config import TestDatasetSize, AsvImplementation\n\n ASV_USE_IMPL = AsvImplementation.get()\n ASV_DATASET_SIZE = TestDatasetSize.get() or \"Small\"\nexcept ImportError:\n # The same benchmarking code can be run for different versions of Modin, so in\n # case of an error importing important variables, we'll just use predefined values\n ASV_USE_IMPL = os.environ.get(\"MODIN_ASV_USE_IMPL\", \"modin\")\n ASV_DATASET_SIZE = os.environ.get(\"MODIN_TEST_DATASET_SIZE\", \"Small\")\n\nassert ASV_USE_IMPL in (\"modin\", \"pandas\")\n\nBINARY_OP_DATA_SIZE = {\n \"Big\": [\n ((5000, 5000), (5000, 5000)),\n # the case extremely inefficient\n # ((20, 500_000), (10, 1_000_000)),\n ((500_000, 20), (1_000_000, 10)),\n ],\n \"Small\": [\n ((250, 250), (250, 250)),\n ((20, 10_000), (10, 25_000)),\n ((10_000, 20), (25_000, 10)),\n ],\n}\n\nUNARY_OP_DATA_SIZE = {\n \"Big\": [\n (5000, 5000),\n # the case extremely inefficient\n # (10, 1_000_000),\n (1_000_000, 10),\n ],\n \"Small\": [\n (250, 250),\n (10, 10_000),\n (10_000, 10),\n ],\n}\n\nGROUPBY_NGROUPS = {\n \"Big\": [100, \"huge_amount_groups\"],\n \"Small\": [5],\n}\n\nIMPL = {\n \"modin\": pd,\n \"pandas\": pandas,\n}\n\n\ndef translator_groupby_ngroups(groupby_ngroups, shape):\n if ASV_DATASET_SIZE == \"Big\":\n if groupby_ngroups == \"huge_amount_groups\":\n return min(shape[0] // 2, 5000)\n return groupby_ngroups\n else:\n return groupby_ngroups\n\n\nclass weakdict(dict):\n __slots__ = (\"__weakref__\",)\n\n\ndata_cache = dict()\ndataframes_cache = dict()\n\n\ndef gen_int_data(nrows, ncols, rand_low, rand_high):\n cache_key = (\"int\", nrows, ncols, rand_low, rand_high)\n if cache_key in data_cache:\n return data_cache[cache_key]\n\n logging.info(\n \"Generating int data {} rows and {} columns [{}-{}]\".format(\n nrows, ncols, rand_low, rand_high\n )\n )\n data = {\n \"col{}\".format(i): random_state.randint(rand_low, rand_high, size=(nrows))\n for i in range(ncols)\n }\n data_cache[cache_key] = weakdict(data)\n return data\n\n\ndef gen_str_int_data(nrows, ncols, rand_low, rand_high):\n cache_key = (\"str_int\", nrows, ncols, rand_low, rand_high)\n if cache_key in data_cache:\n return data_cache[cache_key]\n\n logging.info(\n \"Generating str_int data {} rows and {} columns [{}-{}]\".format(\n nrows, ncols, rand_low, rand_high\n )\n )\n data = gen_int_data(nrows, ncols, rand_low, rand_high).copy()\n data[\"gb_col\"] = [\n \"str_{}\".format(random_state.randint(rand_low, rand_high)) for i in range(nrows)\n ]\n data_cache[cache_key] = weakdict(data)\n return data\n\n\ndef gen_data(data_type, nrows, ncols, rand_low, rand_high):\n if data_type == \"int\":\n return gen_int_data(nrows, ncols, rand_low, rand_high)\n elif data_type == \"str_int\":\n return gen_str_int_data(nrows, ncols, rand_low, rand_high)\n else:\n assert False\n\n\ndef generate_dataframe(\n impl,\n data_type,\n nrows,\n ncols,\n rand_low,\n rand_high,\n groupby_ncols=None,\n count_groups=None,\n):\n assert not (\n (groupby_ncols is None) ^ (count_groups is None)\n ), \"You must either specify both parameters 'groupby_ncols' and 'count_groups' or none of them.\"\n\n if groupby_ncols and count_groups:\n ncols -= groupby_ncols\n cache_key = (\n impl,\n data_type,\n nrows,\n ncols,\n rand_low,\n rand_high,\n groupby_ncols,\n count_groups,\n )\n else:\n cache_key = (impl, data_type, nrows, ncols, rand_low, rand_high)\n\n if cache_key in dataframes_cache:\n return dataframes_cache[cache_key]\n\n logging.info(\n \"Allocating {} DataFrame {}: {} rows and {} columns [{}-{}]\".format(\n impl, data_type, nrows, ncols, rand_low, rand_high\n )\n )\n data = gen_data(data_type, nrows, ncols, rand_low, rand_high)\n\n if groupby_ncols and count_groups:\n groupby_columns = [f\"groupby_col{x}\" for x in range(groupby_ncols)]\n for groupby_col in groupby_columns:\n data[groupby_col] = np.tile(np.arange(count_groups), nrows // count_groups)\n\n if impl == \"modin\":\n df = pd.DataFrame(data)\n elif impl == \"pandas\":\n df = pandas.DataFrame(data)\n else:\n assert False\n\n if groupby_ncols and count_groups:\n dataframes_cache[cache_key] = df, groupby_columns\n return df, groupby_columns\n\n dataframes_cache[cache_key] = df\n return df\n\n\ndef random_string():\n return str(uuid.uuid1())\n\n\ndef random_columns(df_columns, columns_number):\n return list(random_state.choice(df_columns, size=columns_number))\n\n\ndef random_booleans(number):\n return list(random_state.choice([True, False], size=number))\n\n\ndef execute(df):\n \"Make sure the calculations are done.\"\n return df.shape, df.dtypes\n\n\ndef get_shape_id(array):\n return \"_\".join([str(element) for element in array])\n", "path": "asv_bench/benchmarks/utils.py"}]}
3,506
623
gh_patches_debug_13862
rasdani/github-patches
git_diff
pulp__pulpcore-2665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve proxy connection failure error message Raise a more informative error message when the proxy rejects requests from Pulp BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485 </issue> <code> [start of pulpcore/download/http.py] 1 import logging 2 3 import aiohttp 4 import asyncio 5 import backoff 6 7 from .base import BaseDownloader, DownloadResult 8 from pulpcore.exceptions import ( 9 DigestValidationError, 10 SizeValidationError, 11 TimeoutException, 12 ) 13 14 15 log = logging.getLogger(__name__) 16 17 18 logging.getLogger("backoff").addHandler(logging.StreamHandler()) 19 20 21 def http_giveup_handler(exc): 22 """ 23 Inspect a raised exception and determine if we should give up. 24 25 Do not give up when the error is one of the following: 26 27 HTTP 429 - Too Many Requests 28 HTTP 5xx - Server errors 29 Socket timeout 30 TCP disconnect 31 Client SSL Error 32 33 Based on the AWS and Google Cloud guidelines: 34 https://docs.aws.amazon.com/general/latest/gr/api-retries.html 35 https://cloud.google.com/storage/docs/retry-strategy 36 37 Args: 38 exc (Exception): The exception to inspect 39 40 Returns: 41 True if the download should give up, False otherwise 42 """ 43 if isinstance(exc, aiohttp.ClientResponseError): 44 server_error = 500 <= exc.code < 600 45 too_many_requests = exc.code == 429 46 return not server_error and not too_many_requests 47 48 # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal 49 return False 50 51 52 class HttpDownloader(BaseDownloader): 53 """ 54 An HTTP/HTTPS Downloader built on `aiohttp`. 55 56 This downloader downloads data from one `url` and is not reused. 57 58 The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This 59 allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool, 60 connection reuse, and keep-alives across multiple downloaders. When creating many downloaders, 61 have one session shared by all of your `HttpDownloader` objects. 62 63 A session is optional; if omitted, one session will be created, used for this downloader, and 64 then closed when the download is complete. A session that is passed in will not be closed when 65 the download is complete. 66 67 If a session is not provided, the one created by HttpDownloader uses non-default timing values. 68 Specifically, the "total" timeout is set to None and the "sock_connect" and "sock_read" are both 69 5 minutes. For more info on these settings, see the aiohttp docs: 70 http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should 71 allow for an active download to be arbitrarily long, while still detecting dead or closed 72 sessions even when TCPKeepAlive is disabled. 73 74 If a session is not provided, the one created will force TCP connection closure after each 75 request. This is done for compatibility reasons due to various issues related to session 76 continuation implementation in various servers. 77 78 `aiohttp.ClientSession` objects allows you to configure options that will apply to all 79 downloaders using that session such as auth, timeouts, headers, etc. For more info on these 80 options see the `aiohttp.ClientSession` docs for more information: 81 http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession 82 83 The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a 84 `aiohttp.TCPConnector`. For information on configuring either server or client certificate based 85 identity verification, see the aiohttp documentation: 86 http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets 87 88 For more information on `aiohttp.BasicAuth` objects, see their docs: 89 http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth 90 91 Synchronous Download:: 92 93 downloader = HttpDownloader('http://example.com/') 94 result = downloader.fetch() 95 96 Parallel Download:: 97 98 download_coroutines = [ 99 HttpDownloader('http://example.com/').run(), 100 HttpDownloader('http://pulpproject.org/').run(), 101 ] 102 103 loop = asyncio.get_event_loop() 104 done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines)) 105 106 for task in done: 107 try: 108 task.result() # This is a DownloadResult 109 except Exception as error: 110 pass # fatal exceptions are raised by result() 111 112 The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response. 113 The coroutine will automatically retry 10 times with exponential backoff before allowing a 114 final exception to be raised. 115 116 Attributes: 117 session (aiohttp.ClientSession): The session to be used by the downloader. 118 auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None 119 proxy (str): An optional proxy URL or None 120 proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic 121 Authorization or None 122 headers_ready_callback (callable): An optional callback that accepts a single dictionary 123 as its argument. The callback will be called when the response headers are 124 available. The dictionary passed has the header names as the keys and header values 125 as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None. 126 127 This downloader also has all of the attributes of 128 :class:`~pulpcore.plugin.download.BaseDownloader` 129 """ 130 131 def __init__( 132 self, 133 url, 134 session=None, 135 auth=None, 136 proxy=None, 137 proxy_auth=None, 138 headers_ready_callback=None, 139 headers=None, 140 throttler=None, 141 max_retries=0, 142 **kwargs, 143 ): 144 """ 145 Args: 146 url (str): The url to download. 147 session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If 148 not specified it will open the session and close it 149 auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional) 150 proxy (str): An optional proxy URL. 151 proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic 152 Authorization. 153 headers_ready_callback (callable): An optional callback that accepts a single dictionary 154 as its argument. The callback will be called when the response headers are 155 available. The dictionary passed has the header names as the keys and header values 156 as its values. e.g. `{'Transfer-Encoding': 'chunked'}` 157 headers (dict): Headers to be submitted with the request. 158 throttler (asyncio_throttle.Throttler): Throttler for asyncio. 159 max_retries (int): The maximum number of times to retry a download upon failure. 160 kwargs (dict): This accepts the parameters of 161 :class:`~pulpcore.plugin.download.BaseDownloader`. 162 """ 163 if session: 164 self.session = session 165 self._close_session_on_finalize = False 166 else: 167 timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600) 168 conn = aiohttp.TCPConnector({"force_close": True}) 169 self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers) 170 self._close_session_on_finalize = True 171 self.auth = auth 172 self.proxy = proxy 173 self.proxy_auth = proxy_auth 174 self.headers_ready_callback = headers_ready_callback 175 self.download_throttler = throttler 176 self.max_retries = max_retries 177 super().__init__(url, **kwargs) 178 179 def raise_for_status(self, response): 180 """ 181 Raise error if aiohttp response status is >= 400 and not silenced. 182 183 Args: 184 response (aiohttp.ClientResponse): The response to handle. 185 186 Raises: 187 aiohttp.ClientResponseError: When the response status is >= 400. 188 """ 189 response.raise_for_status() 190 191 async def _handle_response(self, response): 192 """ 193 Handle the aiohttp response by writing it to disk and calculating digests 194 195 Args: 196 response (aiohttp.ClientResponse): The response to handle. 197 198 Returns: 199 DownloadResult: Contains information about the result. See the DownloadResult docs for 200 more information. 201 """ 202 if self.headers_ready_callback: 203 await self.headers_ready_callback(response.headers) 204 while True: 205 chunk = await response.content.read(1048576) # 1 megabyte 206 if not chunk: 207 await self.finalize() 208 break # the download is done 209 await self.handle_data(chunk) 210 return DownloadResult( 211 path=self.path, 212 artifact_attributes=self.artifact_attributes, 213 url=self.url, 214 headers=response.headers, 215 ) 216 217 async def run(self, extra_data=None): 218 """ 219 Run the downloader with concurrency restriction and retry logic. 220 221 This method acquires `self.semaphore` before calling the actual download implementation 222 contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff` 223 wrapper around `_run()`, handles backoff-and-retry logic. 224 225 Args: 226 extra_data (dict): Extra data passed to the downloader. 227 228 Returns: 229 :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`. 230 231 """ 232 retryable_errors = ( 233 aiohttp.ClientConnectorSSLError, 234 aiohttp.ClientConnectorError, 235 aiohttp.ClientOSError, 236 aiohttp.ClientPayloadError, 237 aiohttp.ClientResponseError, 238 aiohttp.ServerDisconnectedError, 239 TimeoutError, 240 TimeoutException, 241 DigestValidationError, 242 SizeValidationError, 243 ) 244 245 async with self.semaphore: 246 247 @backoff.on_exception( 248 backoff.expo, 249 retryable_errors, 250 max_tries=self.max_retries + 1, 251 giveup=http_giveup_handler, 252 ) 253 async def download_wrapper(): 254 self._ensure_no_broken_file() 255 try: 256 return await self._run(extra_data=extra_data) 257 except asyncio.TimeoutError: 258 raise TimeoutException(self.url) 259 260 return await download_wrapper() 261 262 async def _run(self, extra_data=None): 263 """ 264 Download, validate, and compute digests on the `url`. This is a coroutine. 265 266 This method is externally wrapped with backoff-and-retry behavior for some errors. 267 It retries with exponential backoff some number of times before allowing a final 268 exception to be raised. 269 270 This method provides the same return object type and documented in 271 :meth:`~pulpcore.plugin.download.BaseDownloader._run`. 272 273 Args: 274 extra_data (dict): Extra data passed by the downloader. 275 """ 276 if self.download_throttler: 277 await self.download_throttler.acquire() 278 async with self.session.get( 279 self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth 280 ) as response: 281 self.raise_for_status(response) 282 to_return = await self._handle_response(response) 283 await response.release() 284 if self._close_session_on_finalize: 285 await self.session.close() 286 return to_return 287 288 def _ensure_no_broken_file(self): 289 """Upon retry reset writer back to None to get a fresh file.""" 290 if self._writer is not None: 291 self._writer.delete = True 292 self._writer.close() 293 self._writer = None 294 [end of pulpcore/download/http.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py --- a/pulpcore/download/http.py +++ b/pulpcore/download/http.py @@ -256,6 +256,17 @@ return await self._run(extra_data=extra_data) except asyncio.TimeoutError: raise TimeoutException(self.url) + except aiohttp.ClientHttpProxyError as e: + log.error( + "Proxy {!r} rejected connection request during a request to " + "{!r}, status={}, message={!r}".format( + e.request_info.real_url, + e.request_info.url, + e.status, + e.message, + ) + ) + raise e return await download_wrapper()
{"golden_diff": "diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py\n--- a/pulpcore/download/http.py\n+++ b/pulpcore/download/http.py\n@@ -256,6 +256,17 @@\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n+ except aiohttp.ClientHttpProxyError as e:\n+ log.error(\n+ \"Proxy {!r} rejected connection request during a request to \"\n+ \"{!r}, status={}, message={!r}\".format(\n+ e.request_info.real_url,\n+ e.request_info.url,\n+ e.status,\n+ e.message,\n+ )\n+ )\n+ raise e\n \n return await download_wrapper()\n", "issue": "Improve proxy connection failure error message\nRaise a more informative error message when the proxy rejects requests from Pulp\r\n\r\nBZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485\n", "before_files": [{"content": "import logging\n\nimport aiohttp\nimport asyncio\nimport backoff\n\nfrom .base import BaseDownloader, DownloadResult\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nlogging.getLogger(\"backoff\").addHandler(logging.StreamHandler())\n\n\ndef http_giveup_handler(exc):\n \"\"\"\n Inspect a raised exception and determine if we should give up.\n\n Do not give up when the error is one of the following:\n\n HTTP 429 - Too Many Requests\n HTTP 5xx - Server errors\n Socket timeout\n TCP disconnect\n Client SSL Error\n\n Based on the AWS and Google Cloud guidelines:\n https://docs.aws.amazon.com/general/latest/gr/api-retries.html\n https://cloud.google.com/storage/docs/retry-strategy\n\n Args:\n exc (Exception): The exception to inspect\n\n Returns:\n True if the download should give up, False otherwise\n \"\"\"\n if isinstance(exc, aiohttp.ClientResponseError):\n server_error = 500 <= exc.code < 600\n too_many_requests = exc.code == 429\n return not server_error and not too_many_requests\n\n # any other type of error (pre-filtered by the backoff decorator) shouldn't be fatal\n return False\n\n\nclass HttpDownloader(BaseDownloader):\n \"\"\"\n An HTTP/HTTPS Downloader built on `aiohttp`.\n\n This downloader downloads data from one `url` and is not reused.\n\n The downloader optionally takes a session argument, which is an `aiohttp.ClientSession`. This\n allows many downloaders to share one `aiohttp.ClientSession` which provides a connection pool,\n connection reuse, and keep-alives across multiple downloaders. When creating many downloaders,\n have one session shared by all of your `HttpDownloader` objects.\n\n A session is optional; if omitted, one session will be created, used for this downloader, and\n then closed when the download is complete. A session that is passed in will not be closed when\n the download is complete.\n\n If a session is not provided, the one created by HttpDownloader uses non-default timing values.\n Specifically, the \"total\" timeout is set to None and the \"sock_connect\" and \"sock_read\" are both\n 5 minutes. For more info on these settings, see the aiohttp docs:\n http://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts Behaviorally, it should\n allow for an active download to be arbitrarily long, while still detecting dead or closed\n sessions even when TCPKeepAlive is disabled.\n\n If a session is not provided, the one created will force TCP connection closure after each\n request. This is done for compatibility reasons due to various issues related to session\n continuation implementation in various servers.\n\n `aiohttp.ClientSession` objects allows you to configure options that will apply to all\n downloaders using that session such as auth, timeouts, headers, etc. For more info on these\n options see the `aiohttp.ClientSession` docs for more information:\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientSession\n\n The `aiohttp.ClientSession` can additionally be configured for SSL configuration by passing in a\n `aiohttp.TCPConnector`. For information on configuring either server or client certificate based\n identity verification, see the aiohttp documentation:\n http://aiohttp.readthedocs.io/en/stable/client.html#ssl-control-for-tcp-sockets\n\n For more information on `aiohttp.BasicAuth` objects, see their docs:\n http://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.BasicAuth\n\n Synchronous Download::\n\n downloader = HttpDownloader('http://example.com/')\n result = downloader.fetch()\n\n Parallel Download::\n\n download_coroutines = [\n HttpDownloader('http://example.com/').run(),\n HttpDownloader('http://pulpproject.org/').run(),\n ]\n\n loop = asyncio.get_event_loop()\n done, not_done = loop.run_until_complete(asyncio.wait(download_coroutines))\n\n for task in done:\n try:\n task.result() # This is a DownloadResult\n except Exception as error:\n pass # fatal exceptions are raised by result()\n\n The HTTPDownloaders contain automatic retry logic if the server responds with HTTP 429 response.\n The coroutine will automatically retry 10 times with exponential backoff before allowing a\n final exception to be raised.\n\n Attributes:\n session (aiohttp.ClientSession): The session to be used by the downloader.\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization or None\n proxy (str): An optional proxy URL or None\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n Authorization or None\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\n as its argument. The callback will be called when the response headers are\n available. The dictionary passed has the header names as the keys and header values\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`. This can also be None.\n\n This downloader also has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(\n self,\n url,\n session=None,\n auth=None,\n proxy=None,\n proxy_auth=None,\n headers_ready_callback=None,\n headers=None,\n throttler=None,\n max_retries=0,\n **kwargs,\n ):\n \"\"\"\n Args:\n url (str): The url to download.\n session (aiohttp.ClientSession): The session to be used by the downloader. (optional) If\n not specified it will open the session and close it\n auth (aiohttp.BasicAuth): An object that represents HTTP Basic Authorization (optional)\n proxy (str): An optional proxy URL.\n proxy_auth (aiohttp.BasicAuth): An optional object that represents proxy HTTP Basic\n Authorization.\n headers_ready_callback (callable): An optional callback that accepts a single dictionary\n as its argument. The callback will be called when the response headers are\n available. The dictionary passed has the header names as the keys and header values\n as its values. e.g. `{'Transfer-Encoding': 'chunked'}`\n headers (dict): Headers to be submitted with the request.\n throttler (asyncio_throttle.Throttler): Throttler for asyncio.\n max_retries (int): The maximum number of times to retry a download upon failure.\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n \"\"\"\n if session:\n self.session = session\n self._close_session_on_finalize = False\n else:\n timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)\n conn = aiohttp.TCPConnector({\"force_close\": True})\n self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)\n self._close_session_on_finalize = True\n self.auth = auth\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n self.headers_ready_callback = headers_ready_callback\n self.download_throttler = throttler\n self.max_retries = max_retries\n super().__init__(url, **kwargs)\n\n def raise_for_status(self, response):\n \"\"\"\n Raise error if aiohttp response status is >= 400 and not silenced.\n\n Args:\n response (aiohttp.ClientResponse): The response to handle.\n\n Raises:\n aiohttp.ClientResponseError: When the response status is >= 400.\n \"\"\"\n response.raise_for_status()\n\n async def _handle_response(self, response):\n \"\"\"\n Handle the aiohttp response by writing it to disk and calculating digests\n\n Args:\n response (aiohttp.ClientResponse): The response to handle.\n\n Returns:\n DownloadResult: Contains information about the result. See the DownloadResult docs for\n more information.\n \"\"\"\n if self.headers_ready_callback:\n await self.headers_ready_callback(response.headers)\n while True:\n chunk = await response.content.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the download is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=response.headers,\n )\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction and retry logic.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n wrapper around `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n retryable_errors = (\n aiohttp.ClientConnectorSSLError,\n aiohttp.ClientConnectorError,\n aiohttp.ClientOSError,\n aiohttp.ClientPayloadError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n TimeoutError,\n TimeoutException,\n DigestValidationError,\n SizeValidationError,\n )\n\n async with self.semaphore:\n\n @backoff.on_exception(\n backoff.expo,\n retryable_errors,\n max_tries=self.max_retries + 1,\n giveup=http_giveup_handler,\n )\n async def download_wrapper():\n self._ensure_no_broken_file()\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n return await download_wrapper()\n\n async def _run(self, extra_data=None):\n \"\"\"\n Download, validate, and compute digests on the `url`. This is a coroutine.\n\n This method is externally wrapped with backoff-and-retry behavior for some errors.\n It retries with exponential backoff some number of times before allowing a final\n exception to be raised.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed by the downloader.\n \"\"\"\n if self.download_throttler:\n await self.download_throttler.acquire()\n async with self.session.get(\n self.url, proxy=self.proxy, proxy_auth=self.proxy_auth, auth=self.auth\n ) as response:\n self.raise_for_status(response)\n to_return = await self._handle_response(response)\n await response.release()\n if self._close_session_on_finalize:\n await self.session.close()\n return to_return\n\n def _ensure_no_broken_file(self):\n \"\"\"Upon retry reset writer back to None to get a fresh file.\"\"\"\n if self._writer is not None:\n self._writer.delete = True\n self._writer.close()\n self._writer = None\n", "path": "pulpcore/download/http.py"}]}
3,786
164
gh_patches_debug_599
rasdani/github-patches
git_diff
pex-tool__pex-1834
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.95 On the docket: + [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821 + [x] Feature request: "universal" lock mode can reject unsupported platforms #1595 + [x] Avoid ENOEXEC for --venv shebangs. #1828 + [x] pex3 lock export does't seem to respect the platform flag. #1826 + [x] Clarify pex3 lock export command. #1645 + [x] Support exporting PYTHONPATH before running user code #1825 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.94" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.94" +__version__ = "2.1.95"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.94\"\n+__version__ = \"2.1.95\"\n", "issue": "Release 2.1.95\nOn the docket:\r\n+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821\r\n+ [x] Feature request: \"universal\" lock mode can reject unsupported platforms #1595\r\n+ [x] Avoid ENOEXEC for --venv shebangs. #1828 \r\n+ [x] pex3 lock export does't seem to respect the platform flag. #1826\r\n+ [x] Clarify pex3 lock export command. #1645\r\n+ [x] Support exporting PYTHONPATH before running user code #1825\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.94\"\n", "path": "pex/version.py"}]}
729
96
gh_patches_debug_17669
rasdani/github-patches
git_diff
gammapy__gammapy-1690
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> IndexError from SpectrumEnergyGroupMaker In this example CTA DC1 analysis the SpectrumEnergyGroupMaker give an IndexError if e.g. `emax=100 TeV` is chosen (see comment in cell 23): https://github.com/gammasky/cta-analyses/blob/bf571038b389b3eb13ce8ba81b35384ebd4b6750/dc-1-checks/hess_j1702/spectrum.ipynb </issue> <code> [start of gammapy/spectrum/energy_group.py] 1 # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 """Spectrum energy bin grouping. 3 4 There are three classes: 5 6 * SpectrumEnergyGroup - one group 7 * SpectrumEnergyGroups - one grouping, i.e. collection of groups 8 * SpectrumEnergyGroupMaker - algorithms to compute groupings. 9 10 Algorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker. 11 The difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that 12 just have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses 13 information from SpectrumObservation (e.g. safe energy range or counts data) and 14 implements higher-level algorithms. 15 """ 16 from __future__ import absolute_import, division, print_function, unicode_literals 17 from collections import OrderedDict 18 from copy import deepcopy 19 import numpy as np 20 import logging 21 from ..extern.six.moves import UserList 22 from astropy.units import Quantity 23 from astropy.table import Table 24 from astropy.table import vstack as table_vstack 25 from ..utils.table import table_from_row_data, table_row_to_dict 26 27 __all__ = [ 28 'SpectrumEnergyGroup', 29 'SpectrumEnergyGroups', 30 'SpectrumEnergyGroupMaker', 31 ] 32 33 log = logging.getLogger(__name__) 34 35 36 class SpectrumEnergyGroup(object): 37 """Spectrum energy group. 38 39 Represents a consecutive range of bin indices (both ends inclusive). 40 """ 41 fields = [ 42 'energy_group_idx', 'bin_idx_min', 'bin_idx_max', 43 'bin_type', 'energy_min', 'energy_max', 44 ] 45 """List of data members of this class.""" 46 47 valid_bin_types = ['normal', 'underflow', 'overflow'] 48 """Valid values for ``bin_types`` attribute.""" 49 50 def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type, 51 energy_min, energy_max): 52 self.energy_group_idx = energy_group_idx 53 self.bin_idx_min = bin_idx_min 54 self.bin_idx_max = bin_idx_max 55 if bin_type not in self.valid_bin_types: 56 raise ValueError('Invalid bin type: {}'.format(bin_type)) 57 self.bin_type = bin_type 58 self.energy_min = Quantity(energy_min) 59 self.energy_max = Quantity(energy_max) 60 61 @classmethod 62 def from_dict(cls, data): 63 data = dict((_, data[_]) for _ in cls.fields) 64 return cls(**data) 65 66 @property 67 def _data(self): 68 return [(_, getattr(self, _)) for _ in self.fields] 69 70 def __repr__(self): 71 txt = ['{}={!r}'.format(k, v) for k, v in self._data] 72 return '{}({})'.format(self.__class__.__name__, ', '.join(txt)) 73 74 def __eq__(self, other): 75 return self.to_dict() == other.to_dict() 76 77 def to_dict(self): 78 return OrderedDict(self._data) 79 80 @property 81 def bin_idx_array(self): 82 """Numpy array of bin indices in the group.""" 83 return np.arange(self.bin_idx_min, self.bin_idx_max + 1) 84 85 @property 86 def bin_table(self): 87 """Create `~astropy.table.Table` with bins in the group. 88 89 Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type`` 90 """ 91 table = Table() 92 table['bin_idx'] = self.bin_idx_array 93 table['energy_group_idx'] = self.energy_group_idx 94 table['bin_type'] = self.bin_type 95 table['energy_min'] = self.energy_min 96 table['energy_max'] = self.energy_max 97 return table 98 99 100 class SpectrumEnergyGroups(UserList): 101 """List of `~gammapy.spectrum.SpectrumEnergyGroup` objects. 102 103 A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`. 104 """ 105 106 def __repr__(self): 107 return '{}(len={})'.format(self.__class__.__name__, len(self)) 108 109 def __str__(self): 110 ss = '{}:\n'.format(self.__class__.__name__) 111 lines = self.to_group_table().pformat(max_width=-1, max_lines=-1) 112 ss += '\n'.join(lines) 113 return ss + '\n' 114 115 def copy(self): 116 """Deep copy""" 117 return deepcopy(self) 118 119 @classmethod 120 def from_total_table(cls, table): 121 """Create list of SpectrumEnergyGroup objects from table.""" 122 groups = cls() 123 124 for energy_group_idx in np.unique(table['energy_group_idx']): 125 mask = table['energy_group_idx'] == energy_group_idx 126 group_table = table[mask] 127 bin_idx_min = group_table['bin_idx'][0] 128 bin_idx_max = group_table['bin_idx'][-1] 129 if len(set(group_table['bin_type'])) > 1: 130 raise ValueError('Inconsistent bin_type within group.') 131 bin_type = group_table['bin_type'][0] 132 energy_min = group_table['energy_min'].quantity[0] 133 energy_max = group_table['energy_max'].quantity[-1] 134 135 group = SpectrumEnergyGroup( 136 energy_group_idx=energy_group_idx, 137 bin_idx_min=bin_idx_min, 138 bin_idx_max=bin_idx_max, 139 bin_type=bin_type, 140 energy_min=energy_min, 141 energy_max=energy_max, 142 ) 143 groups.append(group) 144 145 return groups 146 147 @classmethod 148 def from_group_table(cls, table): 149 """Create from energy groups in `~astropy.table.Table` format.""" 150 return cls([ 151 SpectrumEnergyGroup.from_dict(table_row_to_dict(row)) 152 for row in table 153 ]) 154 155 def to_total_table(self): 156 """Table with one energy bin per row (`~astropy.table.Table`). 157 158 Columns: 159 160 * ``energy_group_idx`` - Energy group index (int) 161 * ``bin_idx`` - Energy bin index (int) 162 * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str) 163 164 There are no energy columns, because the per-bin energy info 165 was lost during grouping. 166 """ 167 tables = [group.bin_table for group in self] 168 return table_vstack(tables) 169 170 def to_group_table(self): 171 """Table with one energy group per row (`~astropy.table.Table`). 172 173 Columns: 174 175 * ``energy_group_idx`` - Energy group index (int) 176 * ``energy_group_n_bins`` - Number of bins in the energy group (int) 177 * ``bin_idx_min`` - First bin index in the energy group (int) 178 * ``bin_idx_max`` - Last bin index in the energy group (int) 179 * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str) 180 * ``energy_min`` - Energy group start energy (Quantity) 181 * ``energy_max`` - Energy group end energy (Quantity) 182 """ 183 rows = [group.to_dict() for group in self] 184 table = table_from_row_data(rows) 185 return table 186 187 @property 188 def energy_range(self): 189 """Total energy range (`~astropy.units.Quantity` of length 2).""" 190 return Quantity([self[0].energy_min, self[-1].energy_max]) 191 192 @property 193 def energy_bounds(self): 194 """Energy group bounds (`~astropy.units.Quantity`).""" 195 energy = [_.energy_min for _ in self] 196 energy.append(self[-1].energy_max) 197 return Quantity(energy) 198 199 200 class SpectrumEnergyGroupMaker(object): 201 """Energy bin groups for spectral analysis. 202 203 This class contains both methods that run algorithms 204 that compute groupings as well as the results as data members 205 and methods to debug and assess the results. 206 207 The input ``obs`` is used read-only, to access the counts energy 208 binning, as well as some other info that is used for energy bin grouping. 209 210 Parameters 211 ---------- 212 obs : `~gammapy.spectrum.SpectrumObservation` 213 Spectrum observation 214 215 Attributes 216 ---------- 217 obs : `~gammapy.spectrum.SpectrumObservation` 218 Spectrum observation data 219 groups : `~gammapy.spectrum.SpectrumEnergyGroups` 220 List of energy groups 221 222 See also 223 -------- 224 SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator 225 """ 226 227 def __init__(self, obs): 228 self.obs = obs 229 self.groups = None 230 231 def groups_from_obs(self): 232 """Compute energy groups with one group per energy bin.""" 233 ebounds_obs = self.obs.e_reco 234 size = ebounds_obs.nbins 235 table = Table() 236 table['bin_idx'] = np.arange(size) 237 table['energy_group_idx'] = np.arange(size) 238 table['bin_type'] = ['normal'] * size 239 table['energy_min'] = ebounds_obs.lower_bounds 240 table['energy_max'] = ebounds_obs.upper_bounds 241 self.groups = SpectrumEnergyGroups.from_total_table(table) 242 243 def compute_groups_fixed(self, ebounds): 244 """Apply grouping for a given fixed energy binning. 245 246 This groups the observation ``obs.e_reco`` binning and 247 ``ebounds`` using a nearest neighbor match on the bin edges. 248 249 Parameters 250 ---------- 251 ebounds : `~astropy.units.Quantity` 252 Energy bounds array 253 """ 254 ebounds_src = self.obs.e_reco 255 bin_edges_src = np.arange(len(ebounds_src)) 256 257 temp = np.interp(ebounds, ebounds_src, bin_edges_src) 258 bin_edges = np.round(temp, decimals=0).astype(np.int) 259 260 # Check for duplicates 261 duplicates_removed = set(bin_edges) 262 if len(duplicates_removed) != len(bin_edges): 263 warn_str = "Input binning\n{}\n contains bins that are finer than the" 264 warn_str += " target binning\n{}\n or outside the valid range" 265 log.warning(warn_str.format(ebounds, ebounds_src)) 266 bin_edges = sorted(duplicates_removed) 267 268 # Create normal bins 269 groups = [] 270 for idx in np.arange(len(bin_edges) - 1): 271 group = SpectrumEnergyGroup( 272 energy_group_idx=-1, 273 bin_idx_min=bin_edges[idx], 274 bin_idx_max=bin_edges[idx + 1] - 1, 275 bin_type='normal', 276 energy_min=ebounds_src[bin_edges[idx]], 277 energy_max=ebounds_src[bin_edges[idx + 1]], 278 ) 279 groups.append(group) 280 281 # Add underflow bin 282 start_edge = groups[0].bin_idx_min 283 if start_edge != 0: 284 underflow = SpectrumEnergyGroup( 285 energy_group_idx=-1, 286 bin_idx_min=0, 287 bin_idx_max=start_edge - 1, 288 bin_type='underflow', 289 energy_min=ebounds_src[0], 290 energy_max=ebounds_src[start_edge], 291 ) 292 groups.insert(0, underflow) 293 294 # Add overflow bin 295 end_edge = groups[-1].bin_idx_max 296 if end_edge != ebounds_src.nbins - 1: 297 overflow = SpectrumEnergyGroup( 298 energy_group_idx=-1, 299 bin_idx_min=end_edge + 1, 300 bin_idx_max=ebounds_src.nbins - 1, 301 bin_type='overflow', 302 energy_min=ebounds_src[end_edge + 1], 303 energy_max=ebounds_src[-1], 304 ) 305 groups.append(overflow) 306 307 # Set energy_group_idx 308 for group_idx, group in enumerate(groups): 309 group.energy_group_idx = group_idx 310 311 self.groups = SpectrumEnergyGroups(groups) 312 [end of gammapy/spectrum/energy_group.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gammapy/spectrum/energy_group.py b/gammapy/spectrum/energy_group.py --- a/gammapy/spectrum/energy_group.py +++ b/gammapy/spectrum/energy_group.py @@ -251,7 +251,7 @@ ebounds : `~astropy.units.Quantity` Energy bounds array """ - ebounds_src = self.obs.e_reco + ebounds_src = self.obs.e_reco.to(ebounds.unit) bin_edges_src = np.arange(len(ebounds_src)) temp = np.interp(ebounds, ebounds_src, bin_edges_src) @@ -278,6 +278,11 @@ ) groups.append(group) + if groups == []: + err_str = "Input binning\n{}\n has no overlap with" + err_str += " target binning\n{}" + raise ValueError(err_str.format(ebounds, ebounds_src)) + # Add underflow bin start_edge = groups[0].bin_idx_min if start_edge != 0:
{"golden_diff": "diff --git a/gammapy/spectrum/energy_group.py b/gammapy/spectrum/energy_group.py\n--- a/gammapy/spectrum/energy_group.py\n+++ b/gammapy/spectrum/energy_group.py\n@@ -251,7 +251,7 @@\n ebounds : `~astropy.units.Quantity`\n Energy bounds array\n \"\"\"\n- ebounds_src = self.obs.e_reco\n+ ebounds_src = self.obs.e_reco.to(ebounds.unit)\n bin_edges_src = np.arange(len(ebounds_src))\n \n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n@@ -278,6 +278,11 @@\n )\n groups.append(group)\n \n+ if groups == []:\n+ err_str = \"Input binning\\n{}\\n has no overlap with\"\n+ err_str += \" target binning\\n{}\"\n+ raise ValueError(err_str.format(ebounds, ebounds_src))\n+\n # Add underflow bin\n start_edge = groups[0].bin_idx_min\n if start_edge != 0:\n", "issue": "IndexError from SpectrumEnergyGroupMaker\nIn this example CTA DC1 analysis the SpectrumEnergyGroupMaker give an IndexError if e.g. `emax=100 TeV` is chosen (see comment in cell 23):\r\n\r\nhttps://github.com/gammasky/cta-analyses/blob/bf571038b389b3eb13ce8ba81b35384ebd4b6750/dc-1-checks/hess_j1702/spectrum.ipynb\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Spectrum energy bin grouping.\n\nThere are three classes:\n\n* SpectrumEnergyGroup - one group\n* SpectrumEnergyGroups - one grouping, i.e. collection of groups\n* SpectrumEnergyGroupMaker - algorithms to compute groupings.\n\nAlgorithms to compute groupings are both on SpectrumEnergyGroups and SpectrumEnergyGroupMaker.\nThe difference is that SpectrumEnergyGroups contains the algorithms and book-keeping that\njust have to do with the groups, whereas SpectrumEnergyGroupMaker also accesses\ninformation from SpectrumObservation (e.g. safe energy range or counts data) and\nimplements higher-level algorithms.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import OrderedDict\nfrom copy import deepcopy\nimport numpy as np\nimport logging\nfrom ..extern.six.moves import UserList\nfrom astropy.units import Quantity\nfrom astropy.table import Table\nfrom astropy.table import vstack as table_vstack\nfrom ..utils.table import table_from_row_data, table_row_to_dict\n\n__all__ = [\n 'SpectrumEnergyGroup',\n 'SpectrumEnergyGroups',\n 'SpectrumEnergyGroupMaker',\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass SpectrumEnergyGroup(object):\n \"\"\"Spectrum energy group.\n\n Represents a consecutive range of bin indices (both ends inclusive).\n \"\"\"\n fields = [\n 'energy_group_idx', 'bin_idx_min', 'bin_idx_max',\n 'bin_type', 'energy_min', 'energy_max',\n ]\n \"\"\"List of data members of this class.\"\"\"\n\n valid_bin_types = ['normal', 'underflow', 'overflow']\n \"\"\"Valid values for ``bin_types`` attribute.\"\"\"\n\n def __init__(self, energy_group_idx, bin_idx_min, bin_idx_max, bin_type,\n energy_min, energy_max):\n self.energy_group_idx = energy_group_idx\n self.bin_idx_min = bin_idx_min\n self.bin_idx_max = bin_idx_max\n if bin_type not in self.valid_bin_types:\n raise ValueError('Invalid bin type: {}'.format(bin_type))\n self.bin_type = bin_type\n self.energy_min = Quantity(energy_min)\n self.energy_max = Quantity(energy_max)\n\n @classmethod\n def from_dict(cls, data):\n data = dict((_, data[_]) for _ in cls.fields)\n return cls(**data)\n\n @property\n def _data(self):\n return [(_, getattr(self, _)) for _ in self.fields]\n\n def __repr__(self):\n txt = ['{}={!r}'.format(k, v) for k, v in self._data]\n return '{}({})'.format(self.__class__.__name__, ', '.join(txt))\n\n def __eq__(self, other):\n return self.to_dict() == other.to_dict()\n\n def to_dict(self):\n return OrderedDict(self._data)\n\n @property\n def bin_idx_array(self):\n \"\"\"Numpy array of bin indices in the group.\"\"\"\n return np.arange(self.bin_idx_min, self.bin_idx_max + 1)\n\n @property\n def bin_table(self):\n \"\"\"Create `~astropy.table.Table` with bins in the group.\n\n Columns are: ``energy_group_idx``, ``bin_idx``, ``bin_type``\n \"\"\"\n table = Table()\n table['bin_idx'] = self.bin_idx_array\n table['energy_group_idx'] = self.energy_group_idx\n table['bin_type'] = self.bin_type\n table['energy_min'] = self.energy_min\n table['energy_max'] = self.energy_max\n return table\n\n\nclass SpectrumEnergyGroups(UserList):\n \"\"\"List of `~gammapy.spectrum.SpectrumEnergyGroup` objects.\n\n A helper class used by the `gammapy.spectrum.SpectrumEnergyGroupsMaker`.\n \"\"\"\n\n def __repr__(self):\n return '{}(len={})'.format(self.__class__.__name__, len(self))\n\n def __str__(self):\n ss = '{}:\\n'.format(self.__class__.__name__)\n lines = self.to_group_table().pformat(max_width=-1, max_lines=-1)\n ss += '\\n'.join(lines)\n return ss + '\\n'\n\n def copy(self):\n \"\"\"Deep copy\"\"\"\n return deepcopy(self)\n\n @classmethod\n def from_total_table(cls, table):\n \"\"\"Create list of SpectrumEnergyGroup objects from table.\"\"\"\n groups = cls()\n\n for energy_group_idx in np.unique(table['energy_group_idx']):\n mask = table['energy_group_idx'] == energy_group_idx\n group_table = table[mask]\n bin_idx_min = group_table['bin_idx'][0]\n bin_idx_max = group_table['bin_idx'][-1]\n if len(set(group_table['bin_type'])) > 1:\n raise ValueError('Inconsistent bin_type within group.')\n bin_type = group_table['bin_type'][0]\n energy_min = group_table['energy_min'].quantity[0]\n energy_max = group_table['energy_max'].quantity[-1]\n\n group = SpectrumEnergyGroup(\n energy_group_idx=energy_group_idx,\n bin_idx_min=bin_idx_min,\n bin_idx_max=bin_idx_max,\n bin_type=bin_type,\n energy_min=energy_min,\n energy_max=energy_max,\n )\n groups.append(group)\n\n return groups\n\n @classmethod\n def from_group_table(cls, table):\n \"\"\"Create from energy groups in `~astropy.table.Table` format.\"\"\"\n return cls([\n SpectrumEnergyGroup.from_dict(table_row_to_dict(row))\n for row in table\n ])\n\n def to_total_table(self):\n \"\"\"Table with one energy bin per row (`~astropy.table.Table`).\n\n Columns:\n\n * ``energy_group_idx`` - Energy group index (int)\n * ``bin_idx`` - Energy bin index (int)\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n\n There are no energy columns, because the per-bin energy info\n was lost during grouping.\n \"\"\"\n tables = [group.bin_table for group in self]\n return table_vstack(tables)\n\n def to_group_table(self):\n \"\"\"Table with one energy group per row (`~astropy.table.Table`).\n\n Columns:\n\n * ``energy_group_idx`` - Energy group index (int)\n * ``energy_group_n_bins`` - Number of bins in the energy group (int)\n * ``bin_idx_min`` - First bin index in the energy group (int)\n * ``bin_idx_max`` - Last bin index in the energy group (int)\n * ``bin_type`` - Bin type {'normal', 'underflow', 'overflow'} (str)\n * ``energy_min`` - Energy group start energy (Quantity)\n * ``energy_max`` - Energy group end energy (Quantity)\n \"\"\"\n rows = [group.to_dict() for group in self]\n table = table_from_row_data(rows)\n return table\n\n @property\n def energy_range(self):\n \"\"\"Total energy range (`~astropy.units.Quantity` of length 2).\"\"\"\n return Quantity([self[0].energy_min, self[-1].energy_max])\n\n @property\n def energy_bounds(self):\n \"\"\"Energy group bounds (`~astropy.units.Quantity`).\"\"\"\n energy = [_.energy_min for _ in self]\n energy.append(self[-1].energy_max)\n return Quantity(energy)\n\n\nclass SpectrumEnergyGroupMaker(object):\n \"\"\"Energy bin groups for spectral analysis.\n\n This class contains both methods that run algorithms\n that compute groupings as well as the results as data members\n and methods to debug and assess the results.\n\n The input ``obs`` is used read-only, to access the counts energy\n binning, as well as some other info that is used for energy bin grouping.\n\n Parameters\n ----------\n obs : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation\n\n Attributes\n ----------\n obs : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation data\n groups : `~gammapy.spectrum.SpectrumEnergyGroups`\n List of energy groups\n\n See also\n --------\n SpectrumEnergyGroups, SpectrumEnergyGroup, FluxPointEstimator\n \"\"\"\n\n def __init__(self, obs):\n self.obs = obs\n self.groups = None\n\n def groups_from_obs(self):\n \"\"\"Compute energy groups with one group per energy bin.\"\"\"\n ebounds_obs = self.obs.e_reco\n size = ebounds_obs.nbins\n table = Table()\n table['bin_idx'] = np.arange(size)\n table['energy_group_idx'] = np.arange(size)\n table['bin_type'] = ['normal'] * size\n table['energy_min'] = ebounds_obs.lower_bounds\n table['energy_max'] = ebounds_obs.upper_bounds\n self.groups = SpectrumEnergyGroups.from_total_table(table)\n\n def compute_groups_fixed(self, ebounds):\n \"\"\"Apply grouping for a given fixed energy binning.\n\n This groups the observation ``obs.e_reco`` binning and\n ``ebounds`` using a nearest neighbor match on the bin edges.\n\n Parameters\n ----------\n ebounds : `~astropy.units.Quantity`\n Energy bounds array\n \"\"\"\n ebounds_src = self.obs.e_reco\n bin_edges_src = np.arange(len(ebounds_src))\n\n temp = np.interp(ebounds, ebounds_src, bin_edges_src)\n bin_edges = np.round(temp, decimals=0).astype(np.int)\n\n # Check for duplicates\n duplicates_removed = set(bin_edges)\n if len(duplicates_removed) != len(bin_edges):\n warn_str = \"Input binning\\n{}\\n contains bins that are finer than the\"\n warn_str += \" target binning\\n{}\\n or outside the valid range\"\n log.warning(warn_str.format(ebounds, ebounds_src))\n bin_edges = sorted(duplicates_removed)\n\n # Create normal bins\n groups = []\n for idx in np.arange(len(bin_edges) - 1):\n group = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=bin_edges[idx],\n bin_idx_max=bin_edges[idx + 1] - 1,\n bin_type='normal',\n energy_min=ebounds_src[bin_edges[idx]],\n energy_max=ebounds_src[bin_edges[idx + 1]],\n )\n groups.append(group)\n\n # Add underflow bin\n start_edge = groups[0].bin_idx_min\n if start_edge != 0:\n underflow = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=0,\n bin_idx_max=start_edge - 1,\n bin_type='underflow',\n energy_min=ebounds_src[0],\n energy_max=ebounds_src[start_edge],\n )\n groups.insert(0, underflow)\n\n # Add overflow bin\n end_edge = groups[-1].bin_idx_max\n if end_edge != ebounds_src.nbins - 1:\n overflow = SpectrumEnergyGroup(\n energy_group_idx=-1,\n bin_idx_min=end_edge + 1,\n bin_idx_max=ebounds_src.nbins - 1,\n bin_type='overflow',\n energy_min=ebounds_src[end_edge + 1],\n energy_max=ebounds_src[-1],\n )\n groups.append(overflow)\n\n # Set energy_group_idx\n for group_idx, group in enumerate(groups):\n group.energy_group_idx = group_idx\n\n self.groups = SpectrumEnergyGroups(groups)\n", "path": "gammapy/spectrum/energy_group.py"}]}
3,983
242
gh_patches_debug_38843
rasdani/github-patches
git_diff
lutris__lutris-1049
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Restore monitor refresh rate My monitor's default refresh rate is 60Hz, but I set it to 72Hz. When game exits, lutris does not restore monitor refresh rate to 72Hz. Is this feature available? If not can it be added? </issue> <code> [start of lutris/util/display.py] 1 import re 2 import time 3 import subprocess 4 5 from lutris.util import system 6 from lutris.util.log import logger 7 8 XRANDR_CACHE = None 9 XRANDR_CACHE_SET_AT = None 10 XGAMMA_FOUND = None 11 12 13 def cached(function): 14 def wrapper(): 15 global XRANDR_CACHE 16 global XRANDR_CACHE_SET_AT 17 18 if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60: 19 return XRANDR_CACHE 20 XRANDR_CACHE = function() 21 XRANDR_CACHE_SET_AT = time.time() 22 return XRANDR_CACHE 23 return wrapper 24 25 26 @cached 27 def get_vidmodes(): 28 xrandr_output = subprocess.Popen(["xrandr"], 29 stdout=subprocess.PIPE).communicate()[0] 30 return list([line for line in xrandr_output.decode().split("\n")]) 31 32 33 def get_outputs(): 34 """Return list of tuples containing output name and geometry.""" 35 outputs = [] 36 vid_modes = get_vidmodes() 37 if not vid_modes: 38 logger.error("xrandr didn't return anything") 39 return [] 40 for line in vid_modes: 41 parts = line.split() 42 if len(parts) < 2: 43 continue 44 if parts[1] == 'connected': 45 if len(parts) == 2: 46 continue 47 if parts[2] != 'primary': 48 geom = parts[2] 49 rotate = parts[3] 50 else: 51 geom = parts[3] 52 rotate = parts[4] 53 if geom.startswith('('): # Screen turned off, no geometry 54 continue 55 if rotate.startswith('('): # Screen not rotated, no need to include 56 outputs.append((parts[0], geom, "normal")) 57 else: 58 if rotate in ("left", "right"): 59 geom_parts = geom.split('+') 60 x_y = geom_parts[0].split('x') 61 geom = "{}x{}+{}+{}".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2]) 62 outputs.append((parts[0], geom, rotate)) 63 return outputs 64 65 66 def get_output_names(): 67 return [output[0] for output in get_outputs()] 68 69 70 def turn_off_except(display): 71 for output in get_outputs(): 72 if output[0] != display: 73 subprocess.Popen(["xrandr", "--output", output[0], "--off"]) 74 75 76 def get_resolutions(): 77 """Return the list of supported screen resolutions.""" 78 resolution_list = [] 79 for line in get_vidmodes(): 80 if line.startswith(" "): 81 resolution_match = re.match('.*?(\d+x\d+).*', line) 82 if resolution_match: 83 resolution_list.append(resolution_match.groups()[0]) 84 return resolution_list 85 86 87 def get_unique_resolutions(): 88 """Return available resolutions, without duplicates and ordered with highest resolution first""" 89 return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True) 90 91 92 def get_current_resolution(monitor=0): 93 """Return the current resolution for the desktop.""" 94 resolution = list() 95 for line in get_vidmodes(): 96 if line.startswith(" ") and "*" in line: 97 resolution_match = re.match('.*?(\d+x\d+).*', line) 98 if resolution_match: 99 resolution.append(resolution_match.groups()[0]) 100 if monitor == 'all': 101 return resolution 102 else: 103 return resolution[monitor] 104 105 106 def change_resolution(resolution): 107 """Change display resolution. 108 109 Takes a string for single monitors or a list of displays as returned 110 by get_outputs(). 111 """ 112 if not resolution: 113 logger.warning("No resolution provided") 114 return 115 if isinstance(resolution, str): 116 logger.debug("Switching resolution to %s", resolution) 117 118 if resolution not in get_resolutions(): 119 logger.warning("Resolution %s doesn't exist." % resolution) 120 else: 121 subprocess.Popen(["xrandr", "-s", resolution]) 122 else: 123 for display in resolution: 124 display_name = display[0] 125 logger.debug("Switching to %s on %s", display[1], display[0]) 126 display_geom = display[1].split('+') 127 display_resolution = display_geom[0] 128 position = (display_geom[1], display_geom[2]) 129 130 if ( 131 len(display) > 2 and 132 display[2] in ('normal', 'left', 'right', 'inverted') 133 ): 134 rotation = display[2] 135 else: 136 rotation = "normal" 137 138 subprocess.Popen([ 139 "xrandr", 140 "--output", display_name, 141 "--mode", display_resolution, 142 "--pos", "{}x{}".format(position[0], position[1]), 143 "--rotate", rotation 144 ]).communicate() 145 146 147 def restore_gamma(): 148 """Restores gamma to a normal level.""" 149 global XGAMMA_FOUND 150 if XGAMMA_FOUND is None: 151 XGAMMA_FOUND = bool(system.find_executable('xgamma')) 152 if XGAMMA_FOUND is True: 153 subprocess.Popen(["xgamma", "-gamma", "1.0"]) 154 else: 155 logger.warning('xgamma is not available on your system') 156 157 158 def get_xrandr_version(): 159 """Return the major and minor version of XRandR utility""" 160 pattern = "version" 161 xrandr_output = subprocess.Popen(["xrandr", "--version"], 162 stdout=subprocess.PIPE).communicate()[0].decode() 163 position = xrandr_output.find(pattern) + len(pattern) 164 version_str = xrandr_output[position:].strip().split(".") 165 try: 166 return {"major": int(version_str[0]), "minor": int(version_str[1])} 167 except ValueError: 168 logger.error("Can't find version in: %s", xrandr_output) 169 return {"major": 0, "minor": 0} 170 171 172 def get_providers(): 173 """Return the list of available graphic cards""" 174 pattern = "name:" 175 providers = list() 176 version = get_xrandr_version() 177 178 if version["major"] == 1 and version["minor"] >= 4: 179 xrandr_output = subprocess.Popen(["xrandr", "--listproviders"], 180 stdout=subprocess.PIPE).communicate()[0].decode() 181 for line in xrandr_output.split("\n"): 182 if line.find("Provider ") != 0: 183 continue 184 position = line.find(pattern) + len(pattern) 185 providers.append(line[position:].strip()) 186 187 return providers 188 [end of lutris/util/display.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/util/display.py b/lutris/util/display.py --- a/lutris/util/display.py +++ b/lutris/util/display.py @@ -34,6 +34,11 @@ """Return list of tuples containing output name and geometry.""" outputs = [] vid_modes = get_vidmodes() + display=None + mode=None + position=None + rotate=None + refresh_rate=None if not vid_modes: logger.error("xrandr didn't return anything") return [] @@ -53,16 +58,18 @@ if geom.startswith('('): # Screen turned off, no geometry continue if rotate.startswith('('): # Screen not rotated, no need to include - outputs.append((parts[0], geom, "normal")) - else: - if rotate in ("left", "right"): - geom_parts = geom.split('+') - x_y = geom_parts[0].split('x') - geom = "{}x{}+{}+{}".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2]) - outputs.append((parts[0], geom, rotate)) + rotate="normal" + geo_split=geom.split('+') + position=geo_split[1] + "x" + geo_split[2] + display=parts[0] + elif '*' in line: + mode=parts[0] + for number in parts: + if '*' in number: + refresh_rate=number[:5] + outputs.append((display, mode, position, rotate, refresh_rate)) return outputs - def get_output_names(): return [output[0] for output in get_outputs()] @@ -123,24 +130,25 @@ for display in resolution: display_name = display[0] logger.debug("Switching to %s on %s", display[1], display[0]) - display_geom = display[1].split('+') - display_resolution = display_geom[0] - position = (display_geom[1], display_geom[2]) + display_mode=display[1] + position=display[2] + refresh_rate=display[4] if ( len(display) > 2 and - display[2] in ('normal', 'left', 'right', 'inverted') + display[3] in ('normal', 'left', 'right', 'inverted') ): - rotation = display[2] + rotation = display[3] else: rotation = "normal" subprocess.Popen([ "xrandr", "--output", display_name, - "--mode", display_resolution, - "--pos", "{}x{}".format(position[0], position[1]), - "--rotate", rotation + "--mode", display_mode, + "--pos", position, + "--rotate", rotation, + "--rate", refresh_rate ]).communicate()
{"golden_diff": "diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -34,6 +34,11 @@\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n+ display=None\n+ mode=None\n+ position=None\n+ rotate=None\n+ refresh_rate=None\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n@@ -53,16 +58,18 @@\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n if rotate.startswith('('): # Screen not rotated, no need to include\n- outputs.append((parts[0], geom, \"normal\"))\n- else:\n- if rotate in (\"left\", \"right\"):\n- geom_parts = geom.split('+')\n- x_y = geom_parts[0].split('x')\n- geom = \"{}x{}+{}+{}\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\n- outputs.append((parts[0], geom, rotate))\n+ rotate=\"normal\"\n+ geo_split=geom.split('+')\n+ position=geo_split[1] + \"x\" + geo_split[2]\n+ display=parts[0]\n+ elif '*' in line:\n+ mode=parts[0]\n+ for number in parts:\n+ if '*' in number:\n+ refresh_rate=number[:5]\n+ outputs.append((display, mode, position, rotate, refresh_rate))\n return outputs\n \n-\n def get_output_names():\n return [output[0] for output in get_outputs()]\n \n@@ -123,24 +130,25 @@\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n- display_geom = display[1].split('+')\n- display_resolution = display_geom[0]\n- position = (display_geom[1], display_geom[2])\n+ display_mode=display[1]\n+ position=display[2]\n+ refresh_rate=display[4]\n \n if (\n len(display) > 2 and\n- display[2] in ('normal', 'left', 'right', 'inverted')\n+ display[3] in ('normal', 'left', 'right', 'inverted')\n ):\n- rotation = display[2]\n+ rotation = display[3]\n else:\n rotation = \"normal\"\n \n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n- \"--mode\", display_resolution,\n- \"--pos\", \"{}x{}\".format(position[0], position[1]),\n- \"--rotate\", rotation\n+ \"--mode\", display_mode,\n+ \"--pos\", position,\n+ \"--rotate\", rotation,\n+ \"--rate\", refresh_rate\n ]).communicate()\n", "issue": "Restore monitor refresh rate\nMy monitor's default refresh rate is 60Hz, but I set it to 72Hz. When game exits, lutris does not restore monitor refresh rate to 72Hz. Is this feature available? If not can it be added?\n", "before_files": [{"content": "import re\nimport time\nimport subprocess\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\n\nXRANDR_CACHE = None\nXRANDR_CACHE_SET_AT = None\nXGAMMA_FOUND = None\n\n\ndef cached(function):\n def wrapper():\n global XRANDR_CACHE\n global XRANDR_CACHE_SET_AT\n\n if XRANDR_CACHE and time.time() - XRANDR_CACHE_SET_AT < 60:\n return XRANDR_CACHE\n XRANDR_CACHE = function()\n XRANDR_CACHE_SET_AT = time.time()\n return XRANDR_CACHE\n return wrapper\n\n\n@cached\ndef get_vidmodes():\n xrandr_output = subprocess.Popen([\"xrandr\"],\n stdout=subprocess.PIPE).communicate()[0]\n return list([line for line in xrandr_output.decode().split(\"\\n\")])\n\n\ndef get_outputs():\n \"\"\"Return list of tuples containing output name and geometry.\"\"\"\n outputs = []\n vid_modes = get_vidmodes()\n if not vid_modes:\n logger.error(\"xrandr didn't return anything\")\n return []\n for line in vid_modes:\n parts = line.split()\n if len(parts) < 2:\n continue\n if parts[1] == 'connected':\n if len(parts) == 2:\n continue\n if parts[2] != 'primary':\n geom = parts[2]\n rotate = parts[3]\n else:\n geom = parts[3]\n rotate = parts[4]\n if geom.startswith('('): # Screen turned off, no geometry\n continue\n if rotate.startswith('('): # Screen not rotated, no need to include\n outputs.append((parts[0], geom, \"normal\"))\n else:\n if rotate in (\"left\", \"right\"):\n geom_parts = geom.split('+')\n x_y = geom_parts[0].split('x')\n geom = \"{}x{}+{}+{}\".format(x_y[1], x_y[0], geom_parts[1], geom_parts[2])\n outputs.append((parts[0], geom, rotate))\n return outputs\n\n\ndef get_output_names():\n return [output[0] for output in get_outputs()]\n\n\ndef turn_off_except(display):\n for output in get_outputs():\n if output[0] != display:\n subprocess.Popen([\"xrandr\", \"--output\", output[0], \"--off\"])\n\n\ndef get_resolutions():\n \"\"\"Return the list of supported screen resolutions.\"\"\"\n resolution_list = []\n for line in get_vidmodes():\n if line.startswith(\" \"):\n resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n if resolution_match:\n resolution_list.append(resolution_match.groups()[0])\n return resolution_list\n\n\ndef get_unique_resolutions():\n \"\"\"Return available resolutions, without duplicates and ordered with highest resolution first\"\"\"\n return sorted(set(get_resolutions()), key=lambda x: int(x.split('x')[0]), reverse=True)\n\n\ndef get_current_resolution(monitor=0):\n \"\"\"Return the current resolution for the desktop.\"\"\"\n resolution = list()\n for line in get_vidmodes():\n if line.startswith(\" \") and \"*\" in line:\n resolution_match = re.match('.*?(\\d+x\\d+).*', line)\n if resolution_match:\n resolution.append(resolution_match.groups()[0])\n if monitor == 'all':\n return resolution\n else:\n return resolution[monitor]\n\n\ndef change_resolution(resolution):\n \"\"\"Change display resolution.\n\n Takes a string for single monitors or a list of displays as returned\n by get_outputs().\n \"\"\"\n if not resolution:\n logger.warning(\"No resolution provided\")\n return\n if isinstance(resolution, str):\n logger.debug(\"Switching resolution to %s\", resolution)\n\n if resolution not in get_resolutions():\n logger.warning(\"Resolution %s doesn't exist.\" % resolution)\n else:\n subprocess.Popen([\"xrandr\", \"-s\", resolution])\n else:\n for display in resolution:\n display_name = display[0]\n logger.debug(\"Switching to %s on %s\", display[1], display[0])\n display_geom = display[1].split('+')\n display_resolution = display_geom[0]\n position = (display_geom[1], display_geom[2])\n\n if (\n len(display) > 2 and\n display[2] in ('normal', 'left', 'right', 'inverted')\n ):\n rotation = display[2]\n else:\n rotation = \"normal\"\n\n subprocess.Popen([\n \"xrandr\",\n \"--output\", display_name,\n \"--mode\", display_resolution,\n \"--pos\", \"{}x{}\".format(position[0], position[1]),\n \"--rotate\", rotation\n ]).communicate()\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n global XGAMMA_FOUND\n if XGAMMA_FOUND is None:\n XGAMMA_FOUND = bool(system.find_executable('xgamma'))\n if XGAMMA_FOUND is True:\n subprocess.Popen([\"xgamma\", \"-gamma\", \"1.0\"])\n else:\n logger.warning('xgamma is not available on your system')\n\n\ndef get_xrandr_version():\n \"\"\"Return the major and minor version of XRandR utility\"\"\"\n pattern = \"version\"\n xrandr_output = subprocess.Popen([\"xrandr\", \"--version\"],\n stdout=subprocess.PIPE).communicate()[0].decode()\n position = xrandr_output.find(pattern) + len(pattern)\n version_str = xrandr_output[position:].strip().split(\".\")\n try:\n return {\"major\": int(version_str[0]), \"minor\": int(version_str[1])}\n except ValueError:\n logger.error(\"Can't find version in: %s\", xrandr_output)\n return {\"major\": 0, \"minor\": 0}\n\n\ndef get_providers():\n \"\"\"Return the list of available graphic cards\"\"\"\n pattern = \"name:\"\n providers = list()\n version = get_xrandr_version()\n\n if version[\"major\"] == 1 and version[\"minor\"] >= 4:\n xrandr_output = subprocess.Popen([\"xrandr\", \"--listproviders\"],\n stdout=subprocess.PIPE).communicate()[0].decode()\n for line in xrandr_output.split(\"\\n\"):\n if line.find(\"Provider \") != 0:\n continue\n position = line.find(pattern) + len(pattern)\n providers.append(line[position:].strip())\n\n return providers\n", "path": "lutris/util/display.py"}]}
2,448
665
gh_patches_debug_20686
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3325
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider labreweries is broken During the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson)) </issue> <code> [start of locations/spiders/labreweries.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import re 4 5 from locations.items import GeojsonPointItem 6 7 8 class LaBreweriesSpider(scrapy.Spider): 9 name = "labreweries" 10 allowed_domains = ["labeerhop.com"] 11 start_urls = ( 12 'http://labeerhop.com/breweries-sitemap.xml', 13 ) 14 15 def store_hours(self, store_hours): 16 day_groups = [] 17 this_day_group = None 18 for day in store_hours: 19 day = day.replace(' :-', ' 12:00 -') 20 day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>') 21 match = re.search(r'(closed|(\d{1,2})\S.\s*-\s*(\d{1,2})\S.)', day.lower()) 22 open('/tmp/test1.txt', 'w').write(str(day)) 23 (dow, f_hr, t_hr) = match.groups() 24 day_short = dow[:2] 25 26 f_hr = int(f_hr) 27 t_hr = int(t_hr) 28 29 hours = '{:02d}-{:02d}'.format( 30 f_hr, 31 t_hr, 32 ) 33 34 if not this_day_group: 35 this_day_group = { 36 'from_day': day_short, 37 'to_day': day_short, 38 'hours': hours 39 } 40 elif this_day_group['hours'] != hours: 41 day_groups.append(this_day_group) 42 this_day_group = { 43 'from_day': day_short, 44 'to_day': day_short, 45 'hours': hours 46 } 47 elif this_day_group['hours'] == hours: 48 this_day_group['to_day'] = day_short 49 50 day_groups.append(this_day_group) 51 52 opening_hours = "" 53 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'): 54 opening_hours = '24/7' 55 else: 56 for day_group in day_groups: 57 if day_group['from_day'] == day_group['to_day']: 58 opening_hours += '{from_day} {hours}; '.format(**day_group) 59 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa': 60 opening_hours += '{hours}; '.format(**day_group) 61 else: 62 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group) 63 opening_hours = opening_hours[:-2] 64 65 return opening_hours 66 67 def address(self, address): 68 if not address: 69 return None 70 71 addr_tags = { 72 "addr_full": address[0].split(',')[0].strip(), 73 "city": address[0].split(',')[1].strip(), 74 "state": address[0].split(' ')[-2].strip(), 75 "postcode": address[0].split(' ')[-1].strip(), 76 } 77 78 return addr_tags 79 80 def parse(self, response): 81 response.selector.remove_namespaces() 82 city_urls = response.xpath('//url/loc/text()').extract() 83 for path in city_urls: 84 if path not in "http://labeerhop.com/breweries/1056/": 85 yield scrapy.Request( 86 path.strip(), 87 callback=self.parse_store, 88 ) 89 90 def parse_store(self, response): 91 92 properties = { 93 'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(), 94 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip("['']"), 95 'opening_hours': re.sub(r'\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(), 96 # 'lon': float(data['geo']['longitude']), # not lon on page 97 # 'lat': float(data['geo']['latitude']), # not lat on page 98 } 99 100 address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract()) 101 if address: 102 properties.update(address) 103 104 105 yield GeojsonPointItem(**properties) 106 [end of locations/spiders/labreweries.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py --- a/locations/spiders/labreweries.py +++ b/locations/spiders/labreweries.py @@ -92,7 +92,7 @@ properties = { 'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(), 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip("['']"), - 'opening_hours': re.sub(r'\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(), + 'opening_hours': re.sub(r'\s+', ' ', response.xpath('//*[@id="content"]/div/div[2]/div[3]').extract()[0].split('<h5 class="mb-2">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(), # 'lon': float(data['geo']['longitude']), # not lon on page # 'lat': float(data['geo']['latitude']), # not lat on page }
{"golden_diff": "diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py\n--- a/locations/spiders/labreweries.py\n+++ b/locations/spiders/labreweries.py\n@@ -92,7 +92,7 @@\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n- 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n+ 'opening_hours': re.sub(r'\\s+', ' ', response.xpath('//*[@id=\"content\"]/div/div[2]/div[3]').extract()[0].split('<h5 class=\"mb-2\">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n", "issue": "Spider labreweries is broken\nDuring the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LaBreweriesSpider(scrapy.Spider):\n name = \"labreweries\"\n allowed_domains = [\"labeerhop.com\"]\n start_urls = (\n 'http://labeerhop.com/breweries-sitemap.xml',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day in store_hours:\n day = day.replace(' :-', ' 12:00 -')\n day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')\n match = re.search(r'(closed|(\\d{1,2})\\S.\\s*-\\s*(\\d{1,2})\\S.)', day.lower())\n open('/tmp/test1.txt', 'w').write(str(day))\n (dow, f_hr, t_hr) = match.groups()\n day_short = dow[:2]\n\n f_hr = int(f_hr)\n t_hr = int(t_hr)\n\n hours = '{:02d}-{:02d}'.format(\n f_hr,\n t_hr,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day_short\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def address(self, address):\n if not address:\n return None\n\n addr_tags = {\n \"addr_full\": address[0].split(',')[0].strip(),\n \"city\": address[0].split(',')[1].strip(),\n \"state\": address[0].split(' ')[-2].strip(),\n \"postcode\": address[0].split(' ')[-1].strip(),\n }\n\n return addr_tags\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path not in \"http://labeerhop.com/breweries/1056/\":\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n\n address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\n if address:\n properties.update(address)\n\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/labreweries.py"}]}
1,887
299
gh_patches_debug_3566
rasdani/github-patches
git_diff
sopel-irc__sopel-1417
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> reddit: support all reddit subdomains #1397 adds support for links to `old.reddit.com`, but any two-letter subdomain of `reddit.com` is valid as well. Reddit uses these for internationalization (e.g. `it.reddit.com` -> Italian UI) and also to allow subreddits to add custom styles (a common example is using `np.reddit.com` to trigger a "no participation" stylesheet that hides voting arrows). Sopel's reddit module should support these subdomains. There are also really old three-letter subdomains (`pay`, `ssl`) that still work. Most three-letter combos I tried redirect to `www.reddit.com/r/combohere` though. </issue> <code> [start of sopel/modules/reddit.py] 1 # coding=utf-8 2 # Author: Elsie Powell, embolalia.com 3 from __future__ import unicode_literals, absolute_import, print_function, division 4 5 from sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP 6 from sopel.formatting import bold, color, colors 7 from sopel.web import USER_AGENT 8 from sopel.tools import SopelMemory, time 9 import datetime as dt 10 import praw 11 import re 12 import sys 13 if sys.version_info.major >= 3: 14 unicode = str 15 if sys.version_info.minor >= 4: 16 from html import unescape 17 else: 18 from html.parser import HTMLParser 19 unescape = HTMLParser().unescape 20 else: 21 from HTMLParser import HTMLParser 22 unescape = HTMLParser().unescape 23 24 25 domain = r'https?://(?:www\.|np\.|old\.)?reddit\.com' 26 post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain 27 user_url = r'%s/u(ser)?/([\w-]+)' % domain 28 post_regex = re.compile(post_url) 29 user_regex = re.compile(user_url) 30 spoiler_subs = [ 31 'stevenuniverse', 32 'onepunchman', 33 ] 34 35 36 def setup(bot): 37 if not bot.memory.contains('url_callbacks'): 38 bot.memory['url_callbacks'] = SopelMemory() 39 bot.memory['url_callbacks'][post_regex] = rpost_info 40 bot.memory['url_callbacks'][user_regex] = redditor_info 41 42 43 def shutdown(bot): 44 del bot.memory['url_callbacks'][post_regex] 45 del bot.memory['url_callbacks'][user_regex] 46 47 48 @rule('.*%s.*' % post_url) 49 def rpost_info(bot, trigger, match=None): 50 match = match or trigger 51 try: 52 r = praw.Reddit( 53 user_agent=USER_AGENT, 54 client_id='6EiphT6SSQq7FQ', 55 client_secret=None, 56 ) 57 s = r.submission(id=match.group(2)) 58 except Exception: 59 r = praw.Reddit(user_agent=USER_AGENT) 60 s = r.get_submission(submission_id=match.group(2)) 61 62 message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | ' 63 '{comments} comments | Posted by {author} | ' 64 'Created at {created}') 65 66 subreddit = s.subreddit.display_name 67 if s.is_self: 68 link = '(self.{})'.format(subreddit) 69 else: 70 link = '({}) to r/{}'.format(s.url, subreddit) 71 72 if s.over_18: 73 if subreddit.lower() in spoiler_subs: 74 nsfw = bold(color(' [SPOILERS]', colors.RED)) 75 else: 76 nsfw = bold(color(' [NSFW]', colors.RED)) 77 78 sfw = bot.db.get_channel_value(trigger.sender, 'sfw') 79 if sfw: 80 link = '(link hidden)' 81 bot.write(['KICK', trigger.sender, trigger.nick, 82 'Linking to NSFW content in a SFW channel.']) 83 else: 84 nsfw = '' 85 86 if s.author: 87 author = s.author.name 88 else: 89 author = '[deleted]' 90 91 tz = time.get_timezone(bot.db, bot.config, None, trigger.nick, 92 trigger.sender) 93 time_created = dt.datetime.utcfromtimestamp(s.created_utc) 94 created = time.format_time(bot.db, bot.config, tz, trigger.nick, 95 trigger.sender, time_created) 96 97 if s.score > 0: 98 point_color = colors.GREEN 99 else: 100 point_color = colors.RED 101 102 percent = color(unicode(s.upvote_ratio * 100) + '%', point_color) 103 104 title = unescape(s.title) 105 message = message.format( 106 title=title, link=link, nsfw=nsfw, points=s.score, percent=percent, 107 comments=s.num_comments, author=author, created=created) 108 109 bot.say(message) 110 111 112 # If you change this, you'll have to change some other things... 113 @commands('redditor') 114 @example('.redditor poem_for_your_sprog') 115 def redditor_info(bot, trigger, match=None): 116 """Show information about the given Redditor""" 117 commanded = re.match(bot.config.core.prefix + 'redditor', trigger) 118 r = praw.Reddit( 119 user_agent=USER_AGENT, 120 client_id='6EiphT6SSQq7FQ', 121 client_secret=None, 122 ) 123 match = match or trigger 124 try: 125 u = r.get_redditor(match.group(2)) 126 except Exception: # TODO: Be specific 127 if commanded: 128 bot.say('No such Redditor.') 129 return NOLIMIT 130 else: 131 return 132 # Fail silently if it wasn't an explicit command. 133 134 message = '[REDDITOR] ' + u.name 135 now = dt.datetime.utcnow() 136 cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc) 137 cakeday_start = cakeday_start.replace(year=now.year) 138 day = dt.timedelta(days=1) 139 year_div_by_400 = now.year % 400 == 0 140 year_div_by_100 = now.year % 100 == 0 141 year_div_by_4 = now.year % 4 == 0 142 is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4) 143 if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)): 144 # If cake day is 2/29 and it's not a leap year, cake day is 1/3. 145 # Cake day begins at exact account creation time. 146 is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day) 147 else: 148 is_cakeday = cakeday_start <= now <= cakeday_start + day 149 150 if is_cakeday: 151 message = message + ' | 13Cake day' 152 if commanded: 153 message = message + ' | https://reddit.com/u/' + u.name 154 if u.is_gold: 155 message = message + ' | 08Gold' 156 if u.is_mod: 157 message = message + ' | 05Mod' 158 message = message + (' | Link: ' + str(u.link_karma) + 159 ' | Comment: ' + str(u.comment_karma)) 160 161 bot.say(message) 162 163 164 # If you change the groups here, you'll have to change some things above. 165 @rule('.*%s.*' % user_url) 166 def auto_redditor_info(bot, trigger): 167 redditor_info(bot, trigger) 168 169 170 @require_chanmsg('.setsfw is only permitted in channels') 171 @commands('setsafeforwork', 'setsfw') 172 @example('.setsfw true') 173 @example('.setsfw false') 174 def update_channel(bot, trigger): 175 """ 176 Sets the Safe for Work status (true or false) for the current 177 channel. Defaults to false. 178 """ 179 if bot.privileges[trigger.sender][trigger.nick] < OP: 180 return 181 else: 182 param = 'true' 183 if trigger.group(2) and trigger.group(3): 184 param = trigger.group(3).strip().lower() 185 sfw = param == 'true' 186 bot.db.set_channel_value(trigger.sender, 'sfw', sfw) 187 if sfw: 188 bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender) 189 else: 190 bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender) 191 192 193 @commands('getsafeforwork', 'getsfw') 194 @example('.getsfw [channel]') 195 def get_channel_sfw(bot, trigger): 196 """ 197 Gets the preferred channel's Safe for Work status, or the current 198 channel's status if no channel given. 199 """ 200 channel = trigger.group(2) 201 if not channel: 202 channel = trigger.sender 203 if channel.is_nick(): 204 return bot.say('.getsfw with no channel param is only permitted in channels') 205 206 channel = channel.strip() 207 208 sfw = bot.db.get_channel_value(channel, 'sfw') 209 if sfw: 210 bot.say('%s is flagged as SFW' % channel) 211 else: 212 bot.say('%s is flagged as NSFW' % channel) 213 [end of sopel/modules/reddit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py --- a/sopel/modules/reddit.py +++ b/sopel/modules/reddit.py @@ -22,7 +22,7 @@ unescape = HTMLParser().unescape -domain = r'https?://(?:www\.|np\.|old\.)?reddit\.com' +domain = r'https?://(?:www\.|old\.|pay\.|ssl\.|[a-z]{2}\.)?reddit\.com' post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain user_url = r'%s/u(ser)?/([\w-]+)' % domain post_regex = re.compile(post_url)
{"golden_diff": "diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py\n--- a/sopel/modules/reddit.py\n+++ b/sopel/modules/reddit.py\n@@ -22,7 +22,7 @@\n unescape = HTMLParser().unescape\n \n \n-domain = r'https?://(?:www\\.|np\\.|old\\.)?reddit\\.com'\n+domain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\n post_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\n user_url = r'%s/u(ser)?/([\\w-]+)' % domain\n post_regex = re.compile(post_url)\n", "issue": "reddit: support all reddit subdomains\n#1397 adds support for links to `old.reddit.com`, but any two-letter subdomain of `reddit.com` is valid as well. Reddit uses these for internationalization (e.g. `it.reddit.com` -> Italian UI) and also to allow subreddits to add custom styles (a common example is using `np.reddit.com` to trigger a \"no participation\" stylesheet that hides voting arrows).\r\n\r\nSopel's reddit module should support these subdomains.\r\n\r\nThere are also really old three-letter subdomains (`pay`, `ssl`) that still work. Most three-letter combos I tried redirect to `www.reddit.com/r/combohere` though.\n", "before_files": [{"content": "# coding=utf-8\n# Author: Elsie Powell, embolalia.com\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\nfrom sopel.formatting import bold, color, colors\nfrom sopel.web import USER_AGENT\nfrom sopel.tools import SopelMemory, time\nimport datetime as dt\nimport praw\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n if sys.version_info.minor >= 4:\n from html import unescape\n else:\n from html.parser import HTMLParser\n unescape = HTMLParser().unescape\nelse:\n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\n\n\ndomain = r'https?://(?:www\\.|np\\.|old\\.)?reddit\\.com'\npost_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\nuser_url = r'%s/u(ser)?/([\\w-]+)' % domain\npost_regex = re.compile(post_url)\nuser_regex = re.compile(user_url)\nspoiler_subs = [\n 'stevenuniverse',\n 'onepunchman',\n]\n\n\ndef setup(bot):\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = SopelMemory()\n bot.memory['url_callbacks'][post_regex] = rpost_info\n bot.memory['url_callbacks'][user_regex] = redditor_info\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][post_regex]\n del bot.memory['url_callbacks'][user_regex]\n\n\n@rule('.*%s.*' % post_url)\ndef rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n s = r.submission(id=match.group(2))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n s = r.get_submission(submission_id=match.group(2))\n\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n 'Created at {created}')\n\n subreddit = s.subreddit.display_name\n if s.is_self:\n link = '(self.{})'.format(subreddit)\n else:\n link = '({}) to r/{}'.format(s.url, subreddit)\n\n if s.over_18:\n if subreddit.lower() in spoiler_subs:\n nsfw = bold(color(' [SPOILERS]', colors.RED))\n else:\n nsfw = bold(color(' [NSFW]', colors.RED))\n\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n if sfw:\n link = '(link hidden)'\n bot.write(['KICK', trigger.sender, trigger.nick,\n 'Linking to NSFW content in a SFW channel.'])\n else:\n nsfw = ''\n\n if s.author:\n author = s.author.name\n else:\n author = '[deleted]'\n\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n trigger.sender)\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, time_created)\n\n if s.score > 0:\n point_color = colors.GREEN\n else:\n point_color = colors.RED\n\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n\n title = unescape(s.title)\n message = message.format(\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n comments=s.num_comments, author=author, created=created)\n\n bot.say(message)\n\n\n# If you change this, you'll have to change some other things...\n@commands('redditor')\n@example('.redditor poem_for_your_sprog')\ndef redditor_info(bot, trigger, match=None):\n \"\"\"Show information about the given Redditor\"\"\"\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n match = match or trigger\n try:\n u = r.get_redditor(match.group(2))\n except Exception: # TODO: Be specific\n if commanded:\n bot.say('No such Redditor.')\n return NOLIMIT\n else:\n return\n # Fail silently if it wasn't an explicit command.\n\n message = '[REDDITOR] ' + u.name\n now = dt.datetime.utcnow()\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n cakeday_start = cakeday_start.replace(year=now.year)\n day = dt.timedelta(days=1)\n year_div_by_400 = now.year % 400 == 0\n year_div_by_100 = now.year % 100 == 0\n year_div_by_4 = now.year % 4 == 0\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n # Cake day begins at exact account creation time.\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n else:\n is_cakeday = cakeday_start <= now <= cakeday_start + day\n\n if is_cakeday:\n message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n if commanded:\n message = message + ' | https://reddit.com/u/' + u.name\n if u.is_gold:\n message = message + ' | \u0002\u000308Gold\u0003\u0002'\n if u.is_mod:\n message = message + ' | \u0002\u000305Mod\u0003\u0002'\n message = message + (' | Link: ' + str(u.link_karma) +\n ' | Comment: ' + str(u.comment_karma))\n\n bot.say(message)\n\n\n# If you change the groups here, you'll have to change some things above.\n@rule('.*%s.*' % user_url)\ndef auto_redditor_info(bot, trigger):\n redditor_info(bot, trigger)\n\n\n@require_chanmsg('.setsfw is only permitted in channels')\n@commands('setsafeforwork', 'setsfw')\n@example('.setsfw true')\n@example('.setsfw false')\ndef update_channel(bot, trigger):\n \"\"\"\n Sets the Safe for Work status (true or false) for the current\n channel. Defaults to false.\n \"\"\"\n if bot.privileges[trigger.sender][trigger.nick] < OP:\n return\n else:\n param = 'true'\n if trigger.group(2) and trigger.group(3):\n param = trigger.group(3).strip().lower()\n sfw = param == 'true'\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n if sfw:\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n else:\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n\n\n@commands('getsafeforwork', 'getsfw')\n@example('.getsfw [channel]')\ndef get_channel_sfw(bot, trigger):\n \"\"\"\n Gets the preferred channel's Safe for Work status, or the current\n channel's status if no channel given.\n \"\"\"\n channel = trigger.group(2)\n if not channel:\n channel = trigger.sender\n if channel.is_nick():\n return bot.say('.getsfw with no channel param is only permitted in channels')\n\n channel = channel.strip()\n\n sfw = bot.db.get_channel_value(channel, 'sfw')\n if sfw:\n bot.say('%s is flagged as SFW' % channel)\n else:\n bot.say('%s is flagged as NSFW' % channel)\n", "path": "sopel/modules/reddit.py"}]}
3,071
163
gh_patches_debug_47927
rasdani/github-patches
git_diff
uccser__cs-unplugged-885
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Minor adjustments to navbar and homepage ## Navbar - [x] There should be more space between logo and 'Topics'. - [x] The search bar can be ~20% smaller. ## Hompeage - [x] Navbar should be transparent and fade in when user scrolls down. </issue> <code> [start of csunplugged/general/views.py] 1 """Views for the general application.""" 2 3 from django.views.generic import TemplateView 4 from django.http import HttpResponse 5 6 7 class GeneralIndexView(TemplateView): 8 """View for the homepage that renders from a template.""" 9 10 template_name = "general/index.html" 11 12 13 class GeneralAboutView(TemplateView): 14 """View for the about page that renders from a template.""" 15 16 template_name = "general/about.html" 17 18 19 class GeneralContactView(TemplateView): 20 """View for the contact page that renders from a template.""" 21 22 template_name = "general/contact.html" 23 24 25 class GeneralPeopleView(TemplateView): 26 """View for the people page that renders from a template.""" 27 28 template_name = "general/people.html" 29 30 31 class GeneralPrinciplesView(TemplateView): 32 """View for the princples page that renders from a template.""" 33 34 template_name = "general/principles.html" 35 36 37 class WhatIsCSView(TemplateView): 38 """View for the 'What is Computer Science?' page that renders from a template.""" 39 40 template_name = "general/what-is-computer-science.html" 41 42 43 class ComputationalThinkingView(TemplateView): 44 """View for the Computational Thinking page that renders from a template.""" 45 46 template_name = "general/computational-thinking.html" 47 48 49 class HowDoITeachCSUnpluggedView(TemplateView): 50 """View for the 'How do I teach CS Unplugged?' page that renders from a template.""" 51 52 template_name = "general/how-do-i-teach-cs-unplugged.html" 53 54 55 def health_check(request): 56 """Return heath check response for Google App Engine. 57 58 Returns a 200 HTTP response for Google App Engine to detect the system 59 is running. 60 """ 61 return HttpResponse(status=200) 62 [end of csunplugged/general/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py --- a/csunplugged/general/views.py +++ b/csunplugged/general/views.py @@ -9,6 +9,16 @@ template_name = "general/index.html" + def get_context_data(self, **kwargs): + """Provide the context data for the homepage. + + Returns: + Dictionary of context data. + """ + context = super(GeneralIndexView, self).get_context_data(**kwargs) + context["homepage"] = True + return context + class GeneralAboutView(TemplateView): """View for the about page that renders from a template."""
{"golden_diff": "diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -9,6 +9,16 @@\n \n template_name = \"general/index.html\"\n \n+ def get_context_data(self, **kwargs):\n+ \"\"\"Provide the context data for the homepage.\n+\n+ Returns:\n+ Dictionary of context data.\n+ \"\"\"\n+ context = super(GeneralIndexView, self).get_context_data(**kwargs)\n+ context[\"homepage\"] = True\n+ return context\n+\n \n class GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n", "issue": "Minor adjustments to navbar and homepage\n## Navbar\r\n\r\n- [x] There should be more space between logo and 'Topics'.\r\n- [x] The search bar can be ~20% smaller.\r\n\r\n## Hompeage\r\n\r\n- [x] Navbar should be transparent and fade in when user scrolls down.\n", "before_files": [{"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass WhatIsCSView(TemplateView):\n \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n\n template_name = \"general/what-is-computer-science.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\nclass HowDoITeachCSUnpluggedView(TemplateView):\n \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n\n template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]}
1,100
154
gh_patches_debug_40993
rasdani/github-patches
git_diff
apluslms__a-plus-1062
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Feature request: send email when teacher adds news Hi It would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not. What do you think? Thanks! Feature request: send email when teacher adds news Hi It would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not. What do you think? Thanks! </issue> <code> [start of news/forms.py] 1 from django import forms 2 3 from .models import News 4 5 6 class NewsForm(forms.ModelForm): 7 8 class Meta: 9 model = News 10 fields = [ 11 'audience', 12 'publish', 13 'pin', 14 'title', 15 'body', 16 ] 17 [end of news/forms.py] [start of lib/email_messages.py] 1 import logging 2 import traceback 3 from django.conf import settings 4 from django.core.mail import send_mail 5 from django.urls import reverse 6 from .helpers import build_aplus_url 7 8 9 logger = logging.getLogger('aplus.lib.email_messages') 10 11 12 def email_course_instance(instance, subject, message, everyone=False) -> bool: 13 """ 14 Sends an email to a course instance's technical support emails or teachers if technical support not set. 15 If everyone == True, sends emails to teachers anyway. 16 """ 17 recipients = [] 18 if instance.technical_error_emails: 19 recipients = instance.technical_error_emails.split(",") 20 if everyone or not recipients: 21 recipients = instance.teachers.exclude(user__email='').values_list("user__email", flat=True) 22 23 if not recipients: 24 raise ValueError("No recipients") 25 26 try: 27 return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1 28 except: 29 logger.exception('Failed to send course instance emails.') 30 raise 31 32 33 def email_course_error(request, exercise, message, exception=True): 34 """ 35 Sends error message to course instance's teachers or technical support emails if set. 36 """ 37 instance = exercise.course_instance 38 39 error_trace = "-" 40 if exception: 41 error_trace = traceback.format_exc() 42 43 if request: 44 request_fields = repr(request) 45 else: 46 request_fields = "No request available" 47 48 subject = settings.EXERCISE_ERROR_SUBJECT.format( 49 course=instance.course.code, 50 exercise=str(exercise)) 51 body = settings.EXERCISE_ERROR_DESCRIPTION.format( 52 message=message, 53 exercise_url=build_aplus_url( 54 exercise.get_absolute_url(), user_url=True), 55 course_edit_url=build_aplus_url( 56 instance.get_url('course-details'), user_url=True), 57 error_trace=error_trace, 58 request_fields=request_fields) 59 60 try: 61 email_course_instance(instance, subject, body) 62 except: 63 pass 64 [end of lib/email_messages.py] [start of news/views.py] 1 from django.core.exceptions import PermissionDenied 2 from django.http import Http404 3 from django.shortcuts import get_object_or_404 4 5 from authorization.permissions import ACCESS 6 from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin 7 from lib.viewbase import BaseFormView, BaseRedirectView 8 from .forms import NewsForm 9 from .models import News 10 11 12 class ListNewsView(CourseInstanceBaseView): 13 access_mode = ACCESS.TEACHER 14 template_name = "news/list.html" 15 16 def get_common_objects(self): 17 super().get_common_objects() 18 self.news = self.instance.news.all() 19 self.note("news") 20 21 22 class EditNewsView(CourseInstanceMixin, BaseFormView): 23 access_mode = ACCESS.TEACHER 24 template_name = "news/edit.html" 25 form_class = NewsForm 26 news_item_kw = "news_id" 27 28 def get_form_kwargs(self): 29 kwargs = super().get_form_kwargs() 30 31 news_id = self._get_kwarg(self.news_item_kw, default=None) 32 if news_id: 33 self.news_item = get_object_or_404( 34 News, 35 pk=news_id, 36 course_instance=self.instance 37 ) 38 self.note("news_item") 39 else: 40 self.news_item = News(course_instance=self.instance) 41 42 kwargs["instance"] = self.news_item 43 return kwargs 44 45 def get_success_url(self): 46 return self.instance.get_url("news-list") 47 48 def form_valid(self, form): 49 form.save() 50 return super().form_valid(form) 51 52 53 class RemoveNewsView(CourseInstanceMixin, BaseRedirectView): 54 access_mode = ACCESS.TEACHER 55 news_item_kw = "news_id" 56 57 def get_resource_objects(self): 58 super().get_resource_objects() 59 self.news_item = get_object_or_404( 60 News, 61 id=self._get_kwarg(self.news_item_kw), 62 course_instance=self.instance, 63 ) 64 self.note("news_item") 65 66 def post(self, request, *args, **kwargs): 67 self.news_item.delete() 68 return self.redirect(self.instance.get_url("news-list")) 69 [end of news/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/email_messages.py b/lib/email_messages.py --- a/lib/email_messages.py +++ b/lib/email_messages.py @@ -1,9 +1,10 @@ import logging import traceback from django.conf import settings -from django.core.mail import send_mail -from django.urls import reverse -from .helpers import build_aplus_url +from django.core.mail import send_mail, send_mass_mail + +from .helpers import Enum, build_aplus_url +from course.models import CourseInstance logger = logging.getLogger('aplus.lib.email_messages') @@ -61,3 +62,30 @@ email_course_instance(instance, subject, body) except: pass + + +def email_course_students( + instance: CourseInstance, + subject: str, + message: str, + audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS, + ) -> int: + """ + Sends an email to students on the course. Audience parameter controls whether the mail goes + to all (default), just internal, or just external students. + Returns number of emails sent, or -1 in case of error. + """ + students = instance.students + if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS: + students = students.filter(organization=settings.LOCAL_ORGANIZATION) + elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS: + students = students.exclude(organization=settings.LOCAL_ORGANIZATION) + + recipients = students.exclude(user__email='').values_list("user__email", flat=True) + emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients)) + + try: + return send_mass_mail(emails) + except: + logger.exception('Failed to send course instance emails.') + return -1 diff --git a/news/forms.py b/news/forms.py --- a/news/forms.py +++ b/news/forms.py @@ -1,16 +1,25 @@ +from typing import Any + from django import forms +from django.utils.translation import gettext_lazy as _ from .models import News class NewsForm(forms.ModelForm): + email = forms.BooleanField( + required=False, + label=_("SEND_EMAIL_TO_STUDENTS"), + ) + class Meta: model = News fields = [ 'audience', 'publish', 'pin', + 'email', 'title', 'body', ] diff --git a/news/views.py b/news/views.py --- a/news/views.py +++ b/news/views.py @@ -1,10 +1,14 @@ +from django.conf import settings +from django.contrib import messages from django.core.exceptions import PermissionDenied from django.http import Http404 from django.shortcuts import get_object_or_404 +from django.utils.translation import gettext_lazy as _ from authorization.permissions import ACCESS from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin from lib.viewbase import BaseFormView, BaseRedirectView +from lib.email_messages import email_course_students from .forms import NewsForm from .models import News @@ -47,6 +51,15 @@ def form_valid(self, form): form.save() + if form.cleaned_data['email']: + subject = f"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}" + if email_course_students( + self.instance, + subject, + self.news_item.body, + self.news_item.audience, + ) < 0: + messages.error(self.request, _('FAILED_TO_SEND_EMAIL')) return super().form_valid(form)
{"golden_diff": "diff --git a/lib/email_messages.py b/lib/email_messages.py\n--- a/lib/email_messages.py\n+++ b/lib/email_messages.py\n@@ -1,9 +1,10 @@\n import logging\n import traceback\n from django.conf import settings\n-from django.core.mail import send_mail\n-from django.urls import reverse\n-from .helpers import build_aplus_url\n+from django.core.mail import send_mail, send_mass_mail\n+\n+from .helpers import Enum, build_aplus_url\n+from course.models import CourseInstance\n \n \n logger = logging.getLogger('aplus.lib.email_messages')\n@@ -61,3 +62,30 @@\n email_course_instance(instance, subject, body)\n except:\n pass\n+\n+\n+def email_course_students(\n+ instance: CourseInstance,\n+ subject: str,\n+ message: str,\n+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\n+ ) -> int:\n+ \"\"\"\n+ Sends an email to students on the course. Audience parameter controls whether the mail goes\n+ to all (default), just internal, or just external students.\n+ Returns number of emails sent, or -1 in case of error.\n+ \"\"\"\n+ students = instance.students\n+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\n+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)\n+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\n+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\n+\n+ recipients = students.exclude(user__email='').values_list(\"user__email\", flat=True)\n+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\n+\n+ try:\n+ return send_mass_mail(emails)\n+ except:\n+ logger.exception('Failed to send course instance emails.')\n+ return -1\ndiff --git a/news/forms.py b/news/forms.py\n--- a/news/forms.py\n+++ b/news/forms.py\n@@ -1,16 +1,25 @@\n+from typing import Any\n+\n from django import forms\n+from django.utils.translation import gettext_lazy as _\n \n from .models import News\n \n \n class NewsForm(forms.ModelForm):\n \n+ email = forms.BooleanField(\n+ required=False,\n+ label=_(\"SEND_EMAIL_TO_STUDENTS\"),\n+ )\n+\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n+ 'email',\n 'title',\n 'body',\n ]\ndiff --git a/news/views.py b/news/views.py\n--- a/news/views.py\n+++ b/news/views.py\n@@ -1,10 +1,14 @@\n+from django.conf import settings\n+from django.contrib import messages\n from django.core.exceptions import PermissionDenied\n from django.http import Http404\n from django.shortcuts import get_object_or_404\n+from django.utils.translation import gettext_lazy as _\n \n from authorization.permissions import ACCESS\n from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\n from lib.viewbase import BaseFormView, BaseRedirectView\n+from lib.email_messages import email_course_students\n from .forms import NewsForm\n from .models import News\n \n@@ -47,6 +51,15 @@\n \n def form_valid(self, form):\n form.save()\n+ if form.cleaned_data['email']:\n+ subject = f\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\"\n+ if email_course_students(\n+ self.instance,\n+ subject,\n+ self.news_item.body,\n+ self.news_item.audience,\n+ ) < 0:\n+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\n return super().form_valid(form)\n", "issue": "Feature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\nFeature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\n", "before_files": [{"content": "from django import forms\n\nfrom .models import News\n\n\nclass NewsForm(forms.ModelForm):\n\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n 'title',\n 'body',\n ]\n", "path": "news/forms.py"}, {"content": "import logging\nimport traceback\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\nfrom .helpers import build_aplus_url\n\n\nlogger = logging.getLogger('aplus.lib.email_messages')\n\n\ndef email_course_instance(instance, subject, message, everyone=False) -> bool:\n \"\"\"\n Sends an email to a course instance's technical support emails or teachers if technical support not set.\n If everyone == True, sends emails to teachers anyway.\n \"\"\"\n recipients = []\n if instance.technical_error_emails:\n recipients = instance.technical_error_emails.split(\",\")\n if everyone or not recipients:\n recipients = instance.teachers.exclude(user__email='').values_list(\"user__email\", flat=True)\n\n if not recipients:\n raise ValueError(\"No recipients\")\n\n try:\n return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\n except:\n logger.exception('Failed to send course instance emails.')\n raise\n\n\ndef email_course_error(request, exercise, message, exception=True):\n \"\"\"\n Sends error message to course instance's teachers or technical support emails if set.\n \"\"\"\n instance = exercise.course_instance\n\n error_trace = \"-\"\n if exception:\n error_trace = traceback.format_exc()\n\n if request:\n request_fields = repr(request)\n else:\n request_fields = \"No request available\"\n\n subject = settings.EXERCISE_ERROR_SUBJECT.format(\n course=instance.course.code,\n exercise=str(exercise))\n body = settings.EXERCISE_ERROR_DESCRIPTION.format(\n message=message,\n exercise_url=build_aplus_url(\n exercise.get_absolute_url(), user_url=True),\n course_edit_url=build_aplus_url(\n instance.get_url('course-details'), user_url=True),\n error_trace=error_trace,\n request_fields=request_fields)\n\n try:\n email_course_instance(instance, subject, body)\n except:\n pass\n", "path": "lib/email_messages.py"}, {"content": "from django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\n\nfrom authorization.permissions import ACCESS\nfrom course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom .forms import NewsForm\nfrom .models import News\n\n\nclass ListNewsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/list.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.news = self.instance.news.all()\n self.note(\"news\")\n\n\nclass EditNewsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/edit.html\"\n form_class = NewsForm\n news_item_kw = \"news_id\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n\n news_id = self._get_kwarg(self.news_item_kw, default=None)\n if news_id:\n self.news_item = get_object_or_404(\n News,\n pk=news_id,\n course_instance=self.instance\n )\n self.note(\"news_item\")\n else:\n self.news_item = News(course_instance=self.instance)\n\n kwargs[\"instance\"] = self.news_item\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url(\"news-list\")\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\nclass RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n news_item_kw = \"news_id\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.news_item = get_object_or_404(\n News,\n id=self._get_kwarg(self.news_item_kw),\n course_instance=self.instance,\n )\n self.note(\"news_item\")\n\n def post(self, request, *args, **kwargs):\n self.news_item.delete()\n return self.redirect(self.instance.get_url(\"news-list\"))\n", "path": "news/views.py"}]}
1,869
812
gh_patches_debug_358
rasdani/github-patches
git_diff
spacetelescope__jwql-550
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cron jobs for monitors currently failing Traceback (most recent call last): File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py", line 58, in <module> from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py", line 77, in <module> from jwql.jwql_monitors import monitor_mast File "/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py", line 25, in <module> from bokeh.embed import components File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py", line 81, in <module> from .util import logconfig File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py", line 87, in <module> level = settings.py_log_level() File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 310, in __call__ return self._convert(os.environ[self._env_var]) File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 236, in convert_logging raise ValueError("Cannot convert {} to log level, valid values are: {}".format(value, ", ".join(_log_levels))) ValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE </issue> <code> [start of setup.py] 1 import numpy as np 2 from setuptools import setup 3 from setuptools import find_packages 4 5 VERSION = '0.22.0' 6 7 AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, ' 8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist' 9 10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project' 11 12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0'] 13 REQUIRES = [ 14 'asdf>=2.3.3', 15 'astropy>=3.2.1', 16 'astroquery>=0.3.9', 17 'authlib', 18 'bokeh>=1.0', 19 'codecov', 20 'django>=2.0', 21 'flake8', 22 'inflection', 23 'ipython', 24 'jinja2', 25 'jsonschema==2.6.0', 26 'jwedb>=0.0.3', 27 'matplotlib', 28 'numpy', 29 'numpydoc', 30 'pandas', 31 'psycopg2', 32 'pysiaf', 33 'pytest', 34 'pytest-cov', 35 'scipy', 36 'sphinx', 37 'sqlalchemy', 38 'stsci_rtd_theme', 39 'twine' 40 ] 41 42 setup( 43 name='jwql', 44 version=VERSION, 45 description=DESCRIPTION, 46 url='https://github.com/spacetelescope/jwql.git', 47 author=AUTHORS, 48 author_email='[email protected]', 49 license='BSD', 50 keywords=['astronomy', 'python'], 51 classifiers=['Programming Language :: Python'], 52 packages=find_packages(), 53 install_requires=REQUIRES, 54 dependency_links=DEPENDENCY_LINKS, 55 include_package_data=True, 56 include_dirs=[np.get_include()], 57 ) 58 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ 'astropy>=3.2.1', 'astroquery>=0.3.9', 'authlib', - 'bokeh>=1.0', + 'bokeh>=1.0,<1.4', 'codecov', 'django>=2.0', 'flake8',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n- 'bokeh>=1.0',\n+ 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n", "issue": "Cron jobs for monitors currently failing \nTraceback (most recent call last):\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py\", line 58, in <module>\r\n from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py\", line 77, in <module>\r\n from jwql.jwql_monitors import monitor_mast\r\n File \"/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py\", line 25, in <module>\r\n from bokeh.embed import components\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py\", line 81, in <module>\r\n from .util import logconfig\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py\", line 87, in <module>\r\n level = settings.py_log_level()\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 310, in __call__\r\n return self._convert(os.environ[self._env_var])\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 236, in convert_logging\r\n raise ValueError(\"Cannot convert {} to log level, valid values are: {}\".format(value, \", \".join(_log_levels)))\r\nValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]}
1,494
101
gh_patches_debug_1710
rasdani/github-patches
git_diff
encode__httpx-407
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package From #396: > ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level. </issue> <code> [start of httpx/dispatch/__init__.py] 1 """ 2 Dispatch classes handle the raw network connections and the implementation 3 details of making the HTTP request and receiving the response. 4 """ 5 [end of httpx/dispatch/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py --- a/httpx/dispatch/__init__.py +++ b/httpx/dispatch/__init__.py @@ -2,3 +2,7 @@ Dispatch classes handle the raw network connections and the implementation details of making the HTTP request and receiving the response. """ +from .asgi import ASGIDispatch +from .wsgi import WSGIDispatch + +__all__ = ["ASGIDispatch", "WSGIDispatch"]
{"golden_diff": "diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py\n--- a/httpx/dispatch/__init__.py\n+++ b/httpx/dispatch/__init__.py\n@@ -2,3 +2,7 @@\n Dispatch classes handle the raw network connections and the implementation\n details of making the HTTP request and receiving the response.\n \"\"\"\n+from .asgi import ASGIDispatch\n+from .wsgi import WSGIDispatch\n+\n+__all__ = [\"ASGIDispatch\", \"WSGIDispatch\"]\n", "issue": "ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package\nFrom #396:\r\n\r\n> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.\n", "before_files": [{"content": "\"\"\"\nDispatch classes handle the raw network connections and the implementation\ndetails of making the HTTP request and receiving the response.\n\"\"\"\n", "path": "httpx/dispatch/__init__.py"}]}
647
121
gh_patches_debug_21028
rasdani/github-patches
git_diff
techmatters__terraso-backend-141
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Landscape creation and update <!-- Use a concise title that describes the request. Bad: localization Good: Translate site into Spanish Bad: customize hostname Good: Determine hostname at build time from environment --> ## Description - Default landscape group should be created when a new landscape is created - Manager should be assigned at the creation of a landscape - Only managers can update landscape data </issue> <code> [start of terraso_backend/apps/core/models/landscapes.py] 1 import structlog 2 from django.db import models 3 4 from apps.core import permission_rules as perm_rules 5 6 from .commons import BaseModel, SlugModel 7 from .groups import Group 8 from .users import User 9 10 logger = structlog.get_logger(__name__) 11 12 13 class Landscape(SlugModel): 14 """ 15 This model represents a Landscape on Terraso platform. 16 17 A Landscape is a socio-ecological system that consists of natural 18 and/or human-modified ecosystems. Defined by its stakeholds, a 19 Landscape usually has geographical boundaries. It may correspond to, 20 or be a combination of, natural boundaries, distinct land features, 21 socially defined areas such as indigenous territories, and/or 22 jurisdictional and administrative boundaries. The boundaries of a 23 Landscape can cross several countries. 24 """ 25 26 name = models.CharField(max_length=128, unique=True) 27 description = models.TextField(max_length=512, blank=True, default="") 28 website = models.URLField(blank=True, default="") 29 location = models.CharField(max_length=128, blank=True, default="") 30 area_polygon = models.JSONField(blank=True, null=True) 31 32 created_by = models.ForeignKey( 33 User, 34 blank=True, 35 null=True, 36 on_delete=models.PROTECT, 37 related_name="created_landscapes", 38 ) 39 groups = models.ManyToManyField(Group, through="LandscapeGroup") 40 41 field_to_slug = "name" 42 43 class Meta(SlugModel.Meta): 44 rules_permissions = { 45 "change": perm_rules.allowed_to_change_landscape, 46 "delete": perm_rules.allowed_to_delete_landscape, 47 } 48 49 def get_default_group(self): 50 """ 51 A default Group in a Landscape is that Group where any 52 individual (associated or not with other Groups) is added when 53 associating directly with a Landscape. 54 """ 55 try: 56 # associated_groups is the related_name defined on 57 # LandscapeGroup relationship with Landscape. It returns a 58 # queryset of LandscapeGroup 59 landscape_group = self.associated_groups.get(is_default_landscape_group=True) 60 except LandscapeGroup.DoesNotExist: 61 logger.error( 62 "Landscape has no default group, but it must have", extra={"landscape_id": self.pk} 63 ) 64 return None 65 66 return landscape_group.group 67 68 def __str__(self): 69 return self.name 70 71 72 class LandscapeGroup(BaseModel): 73 """ 74 This model represents the association between a Landscape and a Group on 75 Terraso platform. 76 """ 77 78 landscape = models.ForeignKey( 79 Landscape, on_delete=models.CASCADE, related_name="associated_groups" 80 ) 81 group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name="associated_landscapes") 82 83 is_default_landscape_group = models.BooleanField(blank=True, default=False) 84 85 class Meta: 86 rules_permissions = { 87 "add": perm_rules.allowed_to_add_landscape_group, 88 "delete": perm_rules.allowed_to_delete_landscape_group, 89 } 90 constraints = ( 91 models.UniqueConstraint( 92 fields=("group", "landscape"), 93 condition=models.Q(deleted_at__isnull=True), 94 name="unique_active_landscape_group", 95 ), 96 ) 97 [end of terraso_backend/apps/core/models/landscapes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py --- a/terraso_backend/apps/core/models/landscapes.py +++ b/terraso_backend/apps/core/models/landscapes.py @@ -1,5 +1,5 @@ import structlog -from django.db import models +from django.db import models, transaction from apps.core import permission_rules as perm_rules @@ -46,6 +46,24 @@ "delete": perm_rules.allowed_to_delete_landscape, } + def save(self, *args, **kwargs): + with transaction.atomic(): + creating = not Landscape.objects.filter(pk=self.pk).exists() + + super().save(*args, **kwargs) + + if creating and self.created_by: + group = Group( + name="Group {}".format(self.slug), + description="", + created_by=self.created_by, + ) + group.save() + landscape_group = LandscapeGroup( + group=group, landscape=self, is_default_landscape_group=True + ) + landscape_group.save() + def get_default_group(self): """ A default Group in a Landscape is that Group where any
{"golden_diff": "diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py\n--- a/terraso_backend/apps/core/models/landscapes.py\n+++ b/terraso_backend/apps/core/models/landscapes.py\n@@ -1,5 +1,5 @@\n import structlog\n-from django.db import models\n+from django.db import models, transaction\n \n from apps.core import permission_rules as perm_rules\n \n@@ -46,6 +46,24 @@\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n \n+ def save(self, *args, **kwargs):\n+ with transaction.atomic():\n+ creating = not Landscape.objects.filter(pk=self.pk).exists()\n+\n+ super().save(*args, **kwargs)\n+\n+ if creating and self.created_by:\n+ group = Group(\n+ name=\"Group {}\".format(self.slug),\n+ description=\"\",\n+ created_by=self.created_by,\n+ )\n+ group.save()\n+ landscape_group = LandscapeGroup(\n+ group=group, landscape=self, is_default_landscape_group=True\n+ )\n+ landscape_group.save()\n+\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n", "issue": "Landscape creation and update\n<!--\r\nUse a concise title that describes the request.\r\nBad: localization\r\nGood: Translate site into Spanish\r\n\r\nBad: customize hostname\r\nGood: Determine hostname at build time from environment\r\n-->\r\n\r\n## Description\r\n- Default landscape group should be created when a new landscape is created\r\n- Manager should be assigned at the creation of a landscape\r\n- Only managers can update landscape data\n", "before_files": [{"content": "import structlog\nfrom django.db import models\n\nfrom apps.core import permission_rules as perm_rules\n\nfrom .commons import BaseModel, SlugModel\nfrom .groups import Group\nfrom .users import User\n\nlogger = structlog.get_logger(__name__)\n\n\nclass Landscape(SlugModel):\n \"\"\"\n This model represents a Landscape on Terraso platform.\n\n A Landscape is a socio-ecological system that consists of natural\n and/or human-modified ecosystems. Defined by its stakeholds, a\n Landscape usually has geographical boundaries. It may correspond to,\n or be a combination of, natural boundaries, distinct land features,\n socially defined areas such as indigenous territories, and/or\n jurisdictional and administrative boundaries. The boundaries of a\n Landscape can cross several countries.\n \"\"\"\n\n name = models.CharField(max_length=128, unique=True)\n description = models.TextField(max_length=512, blank=True, default=\"\")\n website = models.URLField(blank=True, default=\"\")\n location = models.CharField(max_length=128, blank=True, default=\"\")\n area_polygon = models.JSONField(blank=True, null=True)\n\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"created_landscapes\",\n )\n groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n\n field_to_slug = \"name\"\n\n class Meta(SlugModel.Meta):\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_landscape,\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n individual (associated or not with other Groups) is added when\n associating directly with a Landscape.\n \"\"\"\n try:\n # associated_groups is the related_name defined on\n # LandscapeGroup relationship with Landscape. It returns a\n # queryset of LandscapeGroup\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n except LandscapeGroup.DoesNotExist:\n logger.error(\n \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n )\n return None\n\n return landscape_group.group\n\n def __str__(self):\n return self.name\n\n\nclass LandscapeGroup(BaseModel):\n \"\"\"\n This model represents the association between a Landscape and a Group on\n Terraso platform.\n \"\"\"\n\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\n\n class Meta:\n rules_permissions = {\n \"add\": perm_rules.allowed_to_add_landscape_group,\n \"delete\": perm_rules.allowed_to_delete_landscape_group,\n }\n constraints = (\n models.UniqueConstraint(\n fields=(\"group\", \"landscape\"),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_landscape_group\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/landscapes.py"}]}
1,476
278
gh_patches_debug_31833
rasdani/github-patches
git_diff
Flexget__Flexget-2858
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> torrentday pluging winth uid secret gets error expecting string ### Expected behaviour: Setting uid from secrets file should work and update config ### Actual behaviour: When setting the uid from a secret file, since uid is a number, it's converted to integer on the config, this makes a error in the config compilation of: /tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string ### Steps to reproduce: - Step 1: Add config and run flexget check #### Config: ```yaml TV_TorrentDay: discover: what: - next_series_episodes: yes from: - torrentday: uid: "{? torrentday.uid ?}" passkey: '{? torrentday.passkey ?}' cfduid: '{? torrentday.cfduid ?}' rss_key: '{? torrentday.rss_key ?}' category: 'tvBRD' ``` secrets.yaml ```yaml torrentday: uid: "1234567" passkey: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" cfduid: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" rss_key: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ``` #### Log: <details> <summary>(click to expand)</summary> ``` 2021-02-16 01:29:54 CRITICAL manager [/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string 2021-02-16 01:29:54 CRITICAL manager Failed to load config file: Did not pass schema validation. ``` </details> ### Additional information: - FlexGet version: 3.1.102 - Python version: 3.7.3 - Installation method: venv - Using daemon (yes/no): yes - OS and version: debian 10 </issue> <code> [start of flexget/components/sites/sites/torrentday.py] 1 import re 2 3 from loguru import logger 4 from requests.exceptions import RequestException 5 6 from flexget import plugin 7 from flexget.components.sites.urlrewriting import UrlRewritingError 8 from flexget.components.sites.utils import normalize_unicode, torrent_availability 9 from flexget.entry import Entry 10 from flexget.event import event 11 from flexget.plugin import PluginError 12 from flexget.utils import requests 13 from flexget.utils.soup import get_soup 14 from flexget.utils.tools import parse_filesize 15 16 logger = logger.bind(name='torrentday') 17 18 CATEGORIES = { 19 'all': 0, 20 # Movies 21 'mov4k': 96, 22 'mov480p': 25, 23 'movHD': 11, 24 'movBD': 5, 25 'movDVD': 3, 26 'movMP4': 21, 27 'movNonEnglish': 22, 28 'movPACKS': 13, 29 'movSDx264': 44, 30 'movX265': 48, 31 'movXVID': 1, 32 # TV 33 'tv480p': 24, 34 'tvBRD': 32, 35 'tvDVD': 31, 36 'tvDVDrip': 33, 37 'tvMOBILE': 46, 38 'tvNonEnglish': 82, 39 'tvPACKS': 14, 40 'tvSDx264': 26, 41 'tvHDx264': 7, 42 'tvX265': 34, 43 'tvXVID': 2, 44 } 45 46 47 class UrlRewriteTorrentday: 48 """ 49 Torrentday urlrewriter and search plugin. 50 51 torrentday: 52 uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies 53 passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous 54 cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES 55 rss_key: xxxxxxxxx (required) get this from your profile page 56 category: xxxxxxxx 57 58 Category can be one of 59 ID from browsing site OR 'name' 60 movies: 61 mov4k, mov480p, movHD, movBD, movDVD, 62 movMP4, movNonEnglish, movPACKS, 63 movSDx264, movX265, movXVID 64 tv: 65 tv480p, tvBRD, tvDVD, tvDVDrip, 66 tvMOBILE, tvNonEnglish, tvPACKS, 67 tvSDx264, tvHDx264, tvX265, tvXVID 68 """ 69 70 schema = { 71 'type': 'object', 72 'properties': { 73 'rss_key': {'type': 'string'}, 74 'uid': {'type': 'string'}, 75 'passkey': {'type': 'string'}, 76 'cfduid': {'type': 'string'}, 77 'category': { 78 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}] 79 }, 80 }, 81 'required': ['rss_key', 'uid', 'passkey', 'cfduid'], 82 'additionalProperties': False, 83 } 84 85 # urlrewriter API 86 def url_rewritable(self, task, entry): 87 url = entry['url'] 88 if url.find('.torrent'): 89 return False 90 if url.startswith('https://www.torrentday.com'): 91 return True 92 return False 93 94 # urlrewriter API 95 def url_rewrite(self, task, entry): 96 if 'url' not in entry: 97 logger.error('Didn\'t actually get a URL...') 98 else: 99 logger.debug('Got the URL: {}', entry['url']) 100 if entry['url'].startswith('https://www.torrentday.com/browse'): 101 # use search 102 results = self.search(task, entry) 103 if not results: 104 raise UrlRewritingError('No search results found') 105 entry['url'] = results[0]['url'] 106 107 @plugin.internet(logger) 108 def search(self, task, entry, config=None): 109 """ 110 Search for name from torrentday. 111 """ 112 113 categories = config.get('category', 'all') 114 # Make sure categories is a list 115 if not isinstance(categories, list): 116 categories = [categories] 117 # If there are any text categories, turn them into their id number 118 categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories] 119 params = {'cata': 'yes', 'clear-new': 1} 120 params.update({str(c): 1 for c in categories}) 121 122 entries = set() 123 for search_string in entry.get('search_strings', [entry['title']]): 124 125 url = 'https://www.torrentday.com/t' 126 params['q'] = normalize_unicode(search_string).replace(':', '') 127 cookies = { 128 'uid': config['uid'], 129 'pass': config['passkey'], 130 '__cfduid': config['cfduid'], 131 } 132 133 try: 134 page = requests.get(url, params=params, cookies=cookies).content 135 except RequestException as e: 136 raise PluginError('Could not connect to torrentday: {}'.format(e)) 137 138 # the following should avoid table being None due to a malformed 139 # html in td search results 140 soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling 141 table = soup.find('table', {'id': 'torrentTable'}) 142 if table is None: 143 raise PluginError( 144 'Search returned by torrentday appears to be empty or malformed.' 145 ) 146 147 # the first row is the header so skip it 148 for tr in table.find_all('tr')[1:]: 149 entry = Entry() 150 # find the torrent names 151 td = tr.find('td', {'class': 'torrentNameInfo'}) 152 if not td: 153 logger.warning('Could not find entry torrentNameInfo for {}.', search_string) 154 continue 155 title = td.find('a') 156 if not title: 157 logger.warning('Could not determine title for {}.', search_string) 158 continue 159 entry['title'] = title.contents[0] 160 logger.debug('title: {}', title.contents[0]) 161 162 # find download link 163 torrent_url = tr.find('td', {'class': 'ac'}) 164 if not torrent_url: 165 logger.warning('Could not determine download link for {}.', search_string) 166 continue 167 torrent_url = torrent_url.find('a').get('href') 168 169 # construct download URL 170 torrent_url = ( 171 'https://www.torrentday.com/' 172 + torrent_url 173 + '?torrent_pass=' 174 + config['rss_key'] 175 ) 176 logger.debug('RSS-ified download link: {}', torrent_url) 177 entry['url'] = torrent_url 178 179 # us tr object for seeders/leechers 180 seeders = tr.find('td', {'class': 'ac seedersInfo'}) 181 leechers = tr.find('td', {'class': 'ac leechersInfo'}) 182 entry['torrent_seeds'] = int(seeders.contents[0].replace(',', '')) 183 entry['torrent_leeches'] = int(leechers.contents[0].replace(',', '')) 184 entry['torrent_availability'] = torrent_availability( 185 entry['torrent_seeds'], entry['torrent_leeches'] 186 ) 187 188 # use tr object for size 189 size = tr.find('td', text=re.compile(r'([\.\d]+) ([TGMKk]?)B')).contents[0] 190 size = re.search(r'([\.\d]+) ([TGMKk]?)B', str(size)) 191 192 entry['content_size'] = parse_filesize(size.group(0)) 193 194 entries.add(entry) 195 196 return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability')) 197 198 199 @event('plugin.register') 200 def register_plugin(): 201 plugin.register( 202 UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2 203 ) 204 [end of flexget/components/sites/sites/torrentday.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py --- a/flexget/components/sites/sites/torrentday.py +++ b/flexget/components/sites/sites/torrentday.py @@ -6,6 +6,7 @@ from flexget import plugin from flexget.components.sites.urlrewriting import UrlRewritingError from flexget.components.sites.utils import normalize_unicode, torrent_availability +from flexget.config_schema import one_or_more from flexget.entry import Entry from flexget.event import event from flexget.plugin import PluginError @@ -69,12 +70,12 @@ 'type': 'object', 'properties': { 'rss_key': {'type': 'string'}, - 'uid': {'type': 'string'}, + 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]}, 'passkey': {'type': 'string'}, 'cfduid': {'type': 'string'}, - 'category': { - 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}] - }, + 'category': one_or_more( + {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]} + ), }, 'required': ['rss_key', 'uid', 'passkey', 'cfduid'], 'additionalProperties': False, @@ -125,7 +126,7 @@ url = 'https://www.torrentday.com/t' params['q'] = normalize_unicode(search_string).replace(':', '') cookies = { - 'uid': config['uid'], + 'uid': str(config['uid']), 'pass': config['passkey'], '__cfduid': config['cfduid'], }
{"golden_diff": "diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py\n--- a/flexget/components/sites/sites/torrentday.py\n+++ b/flexget/components/sites/sites/torrentday.py\n@@ -6,6 +6,7 @@\n from flexget import plugin\n from flexget.components.sites.urlrewriting import UrlRewritingError\n from flexget.components.sites.utils import normalize_unicode, torrent_availability\n+from flexget.config_schema import one_or_more\n from flexget.entry import Entry\n from flexget.event import event\n from flexget.plugin import PluginError\n@@ -69,12 +70,12 @@\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n- 'uid': {'type': 'string'},\n+ 'uid': {'oneOf': [{'type': 'integer'}, {'type': 'string'}]},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n- 'category': {\n- 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n- },\n+ 'category': one_or_more(\n+ {'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]}\n+ ),\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n@@ -125,7 +126,7 @@\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n- 'uid': config['uid'],\n+ 'uid': str(config['uid']),\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n", "issue": "torrentday pluging winth uid secret gets error expecting string\n### Expected behaviour:\r\n\r\nSetting uid from secrets file should work and update config\r\n\r\n### Actual behaviour:\r\n\r\nWhen setting the uid from a secret file, since uid is a number, it's converted to integer on the config, this makes a error in the config compilation of:\r\n\r\n/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\r\n\r\n### Steps to reproduce:\r\n- Step 1: Add config and run flexget check\r\n\r\n#### Config:\r\n```yaml\r\n TV_TorrentDay:\r\n discover:\r\n what:\r\n - next_series_episodes: yes\r\n from:\r\n - torrentday:\r\n uid: \"{? torrentday.uid ?}\"\r\n passkey: '{? torrentday.passkey ?}'\r\n cfduid: '{? torrentday.cfduid ?}'\r\n rss_key: '{? torrentday.rss_key ?}'\r\n category: 'tvBRD'\r\n```\r\nsecrets.yaml\r\n```yaml\r\ntorrentday:\r\n uid: \"1234567\"\r\n passkey: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n cfduid: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n rss_key: \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n```\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2021-02-16 01:29:54 CRITICAL manager [/tasks/xTV_TorrentDay/discover/from/0/torrentday/uid] Got `1234567`, expected: string\r\n2021-02-16 01:29:54 CRITICAL manager Failed to load config file: Did not pass schema validation.\r\n\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.1.102\r\n- Python version: 3.7.3\r\n- Installation method: venv\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian 10\n", "before_files": [{"content": "import re\n\nfrom loguru import logger\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.components.sites.urlrewriting import UrlRewritingError\nfrom flexget.components.sites.utils import normalize_unicode, torrent_availability\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugin import PluginError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.tools import parse_filesize\n\nlogger = logger.bind(name='torrentday')\n\nCATEGORIES = {\n 'all': 0,\n # Movies\n 'mov4k': 96,\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n 'movDVD': 3,\n 'movMP4': 21,\n 'movNonEnglish': 22,\n 'movPACKS': 13,\n 'movSDx264': 44,\n 'movX265': 48,\n 'movXVID': 1,\n # TV\n 'tv480p': 24,\n 'tvBRD': 32,\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n 'tvNonEnglish': 82,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n 'tvX265': 34,\n 'tvXVID': 2,\n}\n\n\nclass UrlRewriteTorrentday:\n \"\"\"\n Torrentday urlrewriter and search plugin.\n\n torrentday:\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n\n Category can be one of\n ID from browsing site OR 'name'\n movies:\n mov4k, mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n tvMOBILE, tvNonEnglish, tvPACKS,\n tvSDx264, tvHDx264, tvX265, tvXVID\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'type': 'string'},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n 'category': {\n 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n },\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.find('.torrent'):\n return False\n if url.startswith('https://www.torrentday.com'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n logger.error('Didn\\'t actually get a URL...')\n else:\n logger.debug('Got the URL: {}', entry['url'])\n if entry['url'].startswith('https://www.torrentday.com/browse'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError('No search results found')\n entry['url'] = results[0]['url']\n\n @plugin.internet(logger)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from torrentday.\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n params = {'cata': 'yes', 'clear-new': 1}\n params.update({str(c): 1 for c in categories})\n\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n 'uid': config['uid'],\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n\n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: {}'.format(e))\n\n # the following should avoid table being None due to a malformed\n # html in td search results\n soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling\n table = soup.find('table', {'id': 'torrentTable'})\n if table is None:\n raise PluginError(\n 'Search returned by torrentday appears to be empty or malformed.'\n )\n\n # the first row is the header so skip it\n for tr in table.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n td = tr.find('td', {'class': 'torrentNameInfo'})\n if not td:\n logger.warning('Could not find entry torrentNameInfo for {}.', search_string)\n continue\n title = td.find('a')\n if not title:\n logger.warning('Could not determine title for {}.', search_string)\n continue\n entry['title'] = title.contents[0]\n logger.debug('title: {}', title.contents[0])\n\n # find download link\n torrent_url = tr.find('td', {'class': 'ac'})\n if not torrent_url:\n logger.warning('Could not determine download link for {}.', search_string)\n continue\n torrent_url = torrent_url.find('a').get('href')\n\n # construct download URL\n torrent_url = (\n 'https://www.torrentday.com/'\n + torrent_url\n + '?torrent_pass='\n + config['rss_key']\n )\n logger.debug('RSS-ified download link: {}', torrent_url)\n entry['url'] = torrent_url\n\n # us tr object for seeders/leechers\n seeders = tr.find('td', {'class': 'ac seedersInfo'})\n leechers = tr.find('td', {'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['torrent_availability'] = torrent_availability(\n entry['torrent_seeds'], entry['torrent_leeches']\n )\n\n # use tr object for size\n size = tr.find('td', text=re.compile(r'([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n size = re.search(r'([\\.\\d]+) ([TGMKk]?)B', str(size))\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(\n UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2\n )\n", "path": "flexget/components/sites/sites/torrentday.py"}]}
3,267
413
gh_patches_debug_20437
rasdani/github-patches
git_diff
graspologic-org__graspologic-488
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove * imports in subgraph, utils the` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *. </issue> <code> [start of graspy/utils/__init__.py] 1 # Copyright (c) Microsoft Corporation and contributors. 2 # Licensed under the MIT License. 3 4 from .utils import * 5 from .ptr import * 6 [end of graspy/utils/__init__.py] [start of graspy/subgraph/__init__.py] 1 # Copyright (c) Microsoft Corporation and contributors. 2 # Licensed under the MIT License. 3 4 from .sg import * 5 [end of graspy/subgraph/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py --- a/graspy/subgraph/__init__.py +++ b/graspy/subgraph/__init__.py @@ -1,4 +1,7 @@ # Copyright (c) Microsoft Corporation and contributors. # Licensed under the MIT License. -from .sg import * + +from .sg import SignalSubgraph + +__all__ = [SignalSubgraph] diff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py --- a/graspy/utils/__init__.py +++ b/graspy/utils/__init__.py @@ -1,5 +1,42 @@ # Copyright (c) Microsoft Corporation and contributors. # Licensed under the MIT License. -from .utils import * -from .ptr import * +from .utils import ( + import_graph, + import_edgelist, + is_symmetric, + is_loopless, + is_unweighted, + is_almost_symmetric, + symmetrize, + remove_loops, + to_laplace, + is_fully_connected, + get_lcc, + get_multigraph_union_lcc, + get_multigraph_intersect_lcc, + augment_diagonal, + binarize, + cartprod, +) +from .ptr import pass_to_ranks + +__all__ = [ + import_graph, + import_edgelist, + is_symmetric, + is_loopless, + is_unweighted, + is_almost_symmetric, + symmetrize, + remove_loops, + to_laplace, + is_fully_connected, + get_lcc, + get_multigraph_union_lcc, + get_multigraph_intersect_lcc, + augment_diagonal, + binarize, + cartprod, + pass_to_ranks, +]
{"golden_diff": "diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py\n--- a/graspy/subgraph/__init__.py\n+++ b/graspy/subgraph/__init__.py\n@@ -1,4 +1,7 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .sg import *\n+\n+from .sg import SignalSubgraph\n+\n+__all__ = [SignalSubgraph]\ndiff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py\n--- a/graspy/utils/__init__.py\n+++ b/graspy/utils/__init__.py\n@@ -1,5 +1,42 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .utils import *\n-from .ptr import *\n+from .utils import (\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+)\n+from .ptr import pass_to_ranks\n+\n+__all__ = [\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+ pass_to_ranks,\n+]\n", "issue": "Remove * imports in subgraph, utils\nthe` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .utils import *\nfrom .ptr import *\n", "path": "graspy/utils/__init__.py"}, {"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .sg import *\n", "path": "graspy/subgraph/__init__.py"}]}
662
429
gh_patches_debug_27136
rasdani/github-patches
git_diff
getmoto__moto-923
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cloudformation doesn`t work with yaml templates When i try to use moto to mock a call to `create_stack` the following happens: Traceback: ``` File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py", line 107, in dispatch return cls()._dispatch(*args, **kwargs) File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py", line 167, in _dispatch return self.call_action() File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py", line 183, in call_action response = method() File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/responses.py", line 51, in create_stack role_arn=role_arn, File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py", line 126, in create_stack role_arn=role_arn, File "/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py", line 18, in __init__ self.template_dict = json.loads(self.template) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py", line 354, in loads return _default_decoder.decode(s) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py", line 339, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py", line 357, in raw_decode raise JSONDecodeError("Expecting value", s, err.value) from None json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0) ``` #### How to reproduce ```python # coding: utf-8 import unittest from boto.cloudformation import connect_to_region from moto import mock_cloudformation class TestCase(unittest.TestCase): @mock_cloudformation def test_cloudformation_create_stack(self): connection = connect_to_region('sa-east-1') with open('ec2.yml') as fp: template = fp.read() connection.create_stack('test-stack', template_body=template) if __name__ == '__main__': unittest.main() ``` Template: ec2.yaml ```yaml Resources: BastionInstance: Type: "AWS::EC2::Instance" Properties: AvailabilityZone: sa-east-1a DisableApiTermination: false ImageId: ami-37cfad5b InstanceType: t2.micro ``` Moto version: 0.4.31 Boto Version: 2.46.1 Python Version: Python 3.6.0rc1 System: Darwin Iurys-MacBook-Pro.local 15.5.0 Darwin Kernel Version 15.5.0: Tue Apr 19 18:36:36 PDT 2016; root:xnu-3248.50.21~8/RELEASE_X86_64 x86_64 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 from __future__ import unicode_literals 3 from setuptools import setup, find_packages 4 5 install_requires = [ 6 "Jinja2>=2.8", 7 "boto>=2.36.0", 8 "cookies", 9 "requests>=2.0", 10 "xmltodict", 11 "dicttoxml", 12 "six", 13 "werkzeug", 14 "pytz", 15 "python-dateutil", 16 ] 17 18 extras_require = { 19 'server': ['flask'], 20 } 21 22 setup( 23 name='moto', 24 version='0.4.31', 25 description='A library that allows your python tests to easily' 26 ' mock out the boto library', 27 author='Steve Pulec', 28 author_email='[email protected]', 29 url='https://github.com/spulec/moto', 30 entry_points={ 31 'console_scripts': [ 32 'moto_server = moto.server:main', 33 ], 34 }, 35 packages=find_packages(exclude=("tests", "tests.*")), 36 install_requires=install_requires, 37 extras_require=extras_require, 38 license="Apache", 39 test_suite="tests", 40 classifiers=[ 41 "Programming Language :: Python :: 2", 42 "Programming Language :: Python :: 2.7", 43 "Programming Language :: Python :: 3", 44 "Programming Language :: Python :: 3.3", 45 "License :: OSI Approved :: Apache Software License", 46 "Topic :: Software Development :: Testing", 47 ], 48 ) 49 [end of setup.py] [start of moto/cloudformation/models.py] 1 from __future__ import unicode_literals 2 from datetime import datetime 3 import json 4 import uuid 5 6 import boto.cloudformation 7 from moto.core import BaseBackend, BaseModel 8 9 from .parsing import ResourceMap, OutputMap 10 from .utils import generate_stack_id 11 from .exceptions import ValidationError 12 13 14 class FakeStack(BaseModel): 15 16 def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): 17 self.stack_id = stack_id 18 self.name = name 19 self.template = template 20 self.template_dict = json.loads(self.template) 21 self.parameters = parameters 22 self.region_name = region_name 23 self.notification_arns = notification_arns if notification_arns else [] 24 self.role_arn = role_arn 25 self.tags = tags if tags else {} 26 self.events = [] 27 self._add_stack_event("CREATE_IN_PROGRESS", 28 resource_status_reason="User Initiated") 29 30 self.description = self.template_dict.get('Description') 31 self.resource_map = self._create_resource_map() 32 self.output_map = self._create_output_map() 33 self._add_stack_event("CREATE_COMPLETE") 34 self.status = 'CREATE_COMPLETE' 35 36 def _create_resource_map(self): 37 resource_map = ResourceMap( 38 self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict) 39 resource_map.create() 40 return resource_map 41 42 def _create_output_map(self): 43 output_map = OutputMap(self.resource_map, self.template_dict) 44 output_map.create() 45 return output_map 46 47 def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None): 48 self.events.append(FakeEvent( 49 stack_id=self.stack_id, 50 stack_name=self.name, 51 logical_resource_id=self.name, 52 physical_resource_id=self.stack_id, 53 resource_type="AWS::CloudFormation::Stack", 54 resource_status=resource_status, 55 resource_status_reason=resource_status_reason, 56 resource_properties=resource_properties, 57 )) 58 59 def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None): 60 # not used yet... feel free to help yourself 61 resource = self.resource_map[logical_resource_id] 62 self.events.append(FakeEvent( 63 stack_id=self.stack_id, 64 stack_name=self.name, 65 logical_resource_id=logical_resource_id, 66 physical_resource_id=resource.physical_resource_id, 67 resource_type=resource.type, 68 resource_status=resource_status, 69 resource_status_reason=resource_status_reason, 70 resource_properties=resource_properties, 71 )) 72 73 @property 74 def stack_parameters(self): 75 return self.resource_map.resolved_parameters 76 77 @property 78 def stack_resources(self): 79 return self.resource_map.values() 80 81 @property 82 def stack_outputs(self): 83 return self.output_map.values() 84 85 def update(self, template, role_arn=None, parameters=None, tags=None): 86 self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") 87 self.template = template 88 self.resource_map.update(json.loads(template), parameters) 89 self.output_map = self._create_output_map() 90 self._add_stack_event("UPDATE_COMPLETE") 91 self.status = "UPDATE_COMPLETE" 92 self.role_arn = role_arn 93 # only overwrite tags if passed 94 if tags is not None: 95 self.tags = tags 96 # TODO: update tags in the resource map 97 98 def delete(self): 99 self._add_stack_event("DELETE_IN_PROGRESS", 100 resource_status_reason="User Initiated") 101 self.resource_map.delete() 102 self._add_stack_event("DELETE_COMPLETE") 103 self.status = "DELETE_COMPLETE" 104 105 106 class FakeEvent(BaseModel): 107 108 def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None): 109 self.stack_id = stack_id 110 self.stack_name = stack_name 111 self.logical_resource_id = logical_resource_id 112 self.physical_resource_id = physical_resource_id 113 self.resource_type = resource_type 114 self.resource_status = resource_status 115 self.resource_status_reason = resource_status_reason 116 self.resource_properties = resource_properties 117 self.timestamp = datetime.utcnow() 118 self.event_id = uuid.uuid4() 119 120 121 class CloudFormationBackend(BaseBackend): 122 123 def __init__(self): 124 self.stacks = {} 125 self.deleted_stacks = {} 126 127 def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): 128 stack_id = generate_stack_id(name) 129 new_stack = FakeStack( 130 stack_id=stack_id, 131 name=name, 132 template=template, 133 parameters=parameters, 134 region_name=region_name, 135 notification_arns=notification_arns, 136 tags=tags, 137 role_arn=role_arn, 138 ) 139 self.stacks[stack_id] = new_stack 140 return new_stack 141 142 def describe_stacks(self, name_or_stack_id): 143 stacks = self.stacks.values() 144 if name_or_stack_id: 145 for stack in stacks: 146 if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id: 147 return [stack] 148 if self.deleted_stacks: 149 deleted_stacks = self.deleted_stacks.values() 150 for stack in deleted_stacks: 151 if stack.stack_id == name_or_stack_id: 152 return [stack] 153 raise ValidationError(name_or_stack_id) 154 else: 155 return stacks 156 157 def list_stacks(self): 158 return self.stacks.values() 159 160 def get_stack(self, name_or_stack_id): 161 all_stacks = dict(self.deleted_stacks, **self.stacks) 162 if name_or_stack_id in all_stacks: 163 # Lookup by stack id - deleted stacks incldued 164 return all_stacks[name_or_stack_id] 165 else: 166 # Lookup by stack name - undeleted stacks only 167 for stack in self.stacks.values(): 168 if stack.name == name_or_stack_id: 169 return stack 170 171 def update_stack(self, name, template, role_arn=None, parameters=None, tags=None): 172 stack = self.get_stack(name) 173 stack.update(template, role_arn, parameters=parameters, tags=tags) 174 return stack 175 176 def list_stack_resources(self, stack_name_or_id): 177 stack = self.get_stack(stack_name_or_id) 178 return stack.stack_resources 179 180 def delete_stack(self, name_or_stack_id): 181 if name_or_stack_id in self.stacks: 182 # Delete by stack id 183 stack = self.stacks.pop(name_or_stack_id, None) 184 stack.delete() 185 self.deleted_stacks[stack.stack_id] = stack 186 return self.stacks.pop(name_or_stack_id, None) 187 else: 188 # Delete by stack name 189 for stack in list(self.stacks.values()): 190 if stack.name == name_or_stack_id: 191 self.delete_stack(stack.stack_id) 192 193 194 cloudformation_backends = {} 195 for region in boto.cloudformation.regions(): 196 cloudformation_backends[region.name] = CloudFormationBackend() 197 [end of moto/cloudformation/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from datetime import datetime import json +import yaml import uuid import boto.cloudformation @@ -17,7 +18,7 @@ self.stack_id = stack_id self.name = name self.template = template - self.template_dict = json.loads(self.template) + self._parse_template() self.parameters = parameters self.region_name = region_name self.notification_arns = notification_arns if notification_arns else [] @@ -70,6 +71,12 @@ resource_properties=resource_properties, )) + def _parse_template(self): + try: + self.template_dict = yaml.load(self.template) + except yaml.parser.ParserError: + self.template_dict = json.loads(self.template) + @property def stack_parameters(self): return self.resource_map.resolved_parameters diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -11,6 +11,7 @@ "dicttoxml", "six", "werkzeug", + "pyaml", "pytz", "python-dateutil", ]
{"golden_diff": "diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py\n--- a/moto/cloudformation/models.py\n+++ b/moto/cloudformation/models.py\n@@ -1,6 +1,7 @@\n from __future__ import unicode_literals\n from datetime import datetime\n import json\n+import yaml\n import uuid\n \n import boto.cloudformation\n@@ -17,7 +18,7 @@\n self.stack_id = stack_id\n self.name = name\n self.template = template\n- self.template_dict = json.loads(self.template)\n+ self._parse_template()\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n@@ -70,6 +71,12 @@\n resource_properties=resource_properties,\n ))\n \n+ def _parse_template(self):\n+ try:\n+ self.template_dict = yaml.load(self.template)\n+ except yaml.parser.ParserError:\n+ self.template_dict = json.loads(self.template)\n+\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,6 +11,7 @@\n \"dicttoxml\",\n \"six\",\n \"werkzeug\",\n+ \"pyaml\",\n \"pytz\",\n \"python-dateutil\",\n ]\n", "issue": "Cloudformation doesn`t work with yaml templates\nWhen i try to use moto to mock a call to `create_stack` the following happens:\r\n\r\nTraceback:\r\n```\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 107, in dispatch\r\n return cls()._dispatch(*args, **kwargs)\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 167, in _dispatch\r\n return self.call_action()\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/core/responses.py\", line 183, in call_action\r\n response = method()\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/responses.py\", line 51, in create_stack\r\n role_arn=role_arn,\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\", line 126, in create_stack\r\n role_arn=role_arn,\r\n File \"/Users/iuryalvesdesouza/projects/bastion/venv/lib/python3.6/site-packages/moto/cloudformation/models.py\", line 18, in __init__\r\n self.template_dict = json.loads(self.template)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/__init__.py\", line 354, in loads\r\n return _default_decoder.decode(s)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\", line 339, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/json/decoder.py\", line 357, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\n#### How to reproduce\r\n\r\n```python\r\n# coding: utf-8\r\n\r\nimport unittest\r\nfrom boto.cloudformation import connect_to_region\r\nfrom moto import mock_cloudformation\r\n\r\n\r\nclass TestCase(unittest.TestCase):\r\n\r\n @mock_cloudformation\r\n def test_cloudformation_create_stack(self):\r\n\t connection = connect_to_region('sa-east-1')\r\n\t with open('ec2.yml') as fp:\r\n template = fp.read()\r\n\r\n connection.create_stack('test-stack', template_body=template)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n```\r\n\r\nTemplate: ec2.yaml\r\n```yaml\r\nResources:\r\n BastionInstance:\r\n Type: \"AWS::EC2::Instance\"\r\n Properties: \r\n AvailabilityZone: sa-east-1a\r\n DisableApiTermination: false\r\n ImageId: ami-37cfad5b\r\n InstanceType: t2.micro\r\n```\r\n\r\nMoto version: 0.4.31\r\nBoto Version: 2.46.1\r\nPython Version: Python 3.6.0rc1\r\nSystem: Darwin Iurys-MacBook-Pro.local 15.5.0 Darwin Kernel Version 15.5.0: Tue Apr 19 18:36:36 PDT 2016; root:xnu-3248.50.21~8/RELEASE_X86_64 x86_64\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2>=2.8\",\n \"boto>=2.36.0\",\n \"cookies\",\n \"requests>=2.0\",\n \"xmltodict\",\n \"dicttoxml\",\n \"six\",\n \"werkzeug\",\n \"pytz\",\n \"python-dateutil\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\nsetup(\n name='moto',\n version='0.4.31',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}, {"content": "from __future__ import unicode_literals\nfrom datetime import datetime\nimport json\nimport uuid\n\nimport boto.cloudformation\nfrom moto.core import BaseBackend, BaseModel\n\nfrom .parsing import ResourceMap, OutputMap\nfrom .utils import generate_stack_id\nfrom .exceptions import ValidationError\n\n\nclass FakeStack(BaseModel):\n\n def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n self.stack_id = stack_id\n self.name = name\n self.template = template\n self.template_dict = json.loads(self.template)\n self.parameters = parameters\n self.region_name = region_name\n self.notification_arns = notification_arns if notification_arns else []\n self.role_arn = role_arn\n self.tags = tags if tags else {}\n self.events = []\n self._add_stack_event(\"CREATE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n\n self.description = self.template_dict.get('Description')\n self.resource_map = self._create_resource_map()\n self.output_map = self._create_output_map()\n self._add_stack_event(\"CREATE_COMPLETE\")\n self.status = 'CREATE_COMPLETE'\n\n def _create_resource_map(self):\n resource_map = ResourceMap(\n self.stack_id, self.name, self.parameters, self.tags, self.region_name, self.template_dict)\n resource_map.create()\n return resource_map\n\n def _create_output_map(self):\n output_map = OutputMap(self.resource_map, self.template_dict)\n output_map.create()\n return output_map\n\n def _add_stack_event(self, resource_status, resource_status_reason=None, resource_properties=None):\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=self.name,\n physical_resource_id=self.stack_id,\n resource_type=\"AWS::CloudFormation::Stack\",\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n def _add_resource_event(self, logical_resource_id, resource_status, resource_status_reason=None, resource_properties=None):\n # not used yet... feel free to help yourself\n resource = self.resource_map[logical_resource_id]\n self.events.append(FakeEvent(\n stack_id=self.stack_id,\n stack_name=self.name,\n logical_resource_id=logical_resource_id,\n physical_resource_id=resource.physical_resource_id,\n resource_type=resource.type,\n resource_status=resource_status,\n resource_status_reason=resource_status_reason,\n resource_properties=resource_properties,\n ))\n\n @property\n def stack_parameters(self):\n return self.resource_map.resolved_parameters\n\n @property\n def stack_resources(self):\n return self.resource_map.values()\n\n @property\n def stack_outputs(self):\n return self.output_map.values()\n\n def update(self, template, role_arn=None, parameters=None, tags=None):\n self._add_stack_event(\"UPDATE_IN_PROGRESS\", resource_status_reason=\"User Initiated\")\n self.template = template\n self.resource_map.update(json.loads(template), parameters)\n self.output_map = self._create_output_map()\n self._add_stack_event(\"UPDATE_COMPLETE\")\n self.status = \"UPDATE_COMPLETE\"\n self.role_arn = role_arn\n # only overwrite tags if passed\n if tags is not None:\n self.tags = tags\n # TODO: update tags in the resource map\n\n def delete(self):\n self._add_stack_event(\"DELETE_IN_PROGRESS\",\n resource_status_reason=\"User Initiated\")\n self.resource_map.delete()\n self._add_stack_event(\"DELETE_COMPLETE\")\n self.status = \"DELETE_COMPLETE\"\n\n\nclass FakeEvent(BaseModel):\n\n def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):\n self.stack_id = stack_id\n self.stack_name = stack_name\n self.logical_resource_id = logical_resource_id\n self.physical_resource_id = physical_resource_id\n self.resource_type = resource_type\n self.resource_status = resource_status\n self.resource_status_reason = resource_status_reason\n self.resource_properties = resource_properties\n self.timestamp = datetime.utcnow()\n self.event_id = uuid.uuid4()\n\n\nclass CloudFormationBackend(BaseBackend):\n\n def __init__(self):\n self.stacks = {}\n self.deleted_stacks = {}\n\n def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None):\n stack_id = generate_stack_id(name)\n new_stack = FakeStack(\n stack_id=stack_id,\n name=name,\n template=template,\n parameters=parameters,\n region_name=region_name,\n notification_arns=notification_arns,\n tags=tags,\n role_arn=role_arn,\n )\n self.stacks[stack_id] = new_stack\n return new_stack\n\n def describe_stacks(self, name_or_stack_id):\n stacks = self.stacks.values()\n if name_or_stack_id:\n for stack in stacks:\n if stack.name == name_or_stack_id or stack.stack_id == name_or_stack_id:\n return [stack]\n if self.deleted_stacks:\n deleted_stacks = self.deleted_stacks.values()\n for stack in deleted_stacks:\n if stack.stack_id == name_or_stack_id:\n return [stack]\n raise ValidationError(name_or_stack_id)\n else:\n return stacks\n\n def list_stacks(self):\n return self.stacks.values()\n\n def get_stack(self, name_or_stack_id):\n all_stacks = dict(self.deleted_stacks, **self.stacks)\n if name_or_stack_id in all_stacks:\n # Lookup by stack id - deleted stacks incldued\n return all_stacks[name_or_stack_id]\n else:\n # Lookup by stack name - undeleted stacks only\n for stack in self.stacks.values():\n if stack.name == name_or_stack_id:\n return stack\n\n def update_stack(self, name, template, role_arn=None, parameters=None, tags=None):\n stack = self.get_stack(name)\n stack.update(template, role_arn, parameters=parameters, tags=tags)\n return stack\n\n def list_stack_resources(self, stack_name_or_id):\n stack = self.get_stack(stack_name_or_id)\n return stack.stack_resources\n\n def delete_stack(self, name_or_stack_id):\n if name_or_stack_id in self.stacks:\n # Delete by stack id\n stack = self.stacks.pop(name_or_stack_id, None)\n stack.delete()\n self.deleted_stacks[stack.stack_id] = stack\n return self.stacks.pop(name_or_stack_id, None)\n else:\n # Delete by stack name\n for stack in list(self.stacks.values()):\n if stack.name == name_or_stack_id:\n self.delete_stack(stack.stack_id)\n\n\ncloudformation_backends = {}\nfor region in boto.cloudformation.regions():\n cloudformation_backends[region.name] = CloudFormationBackend()\n", "path": "moto/cloudformation/models.py"}]}
3,759
306
gh_patches_debug_19187
rasdani/github-patches
git_diff
mozmeao__snippets-service-769
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create a button to copy to clipboard the preview link on click Preview link for ASR Snippets cannot be directly clicked and instead must be copied and pasted to the URL bar. Create a `Copy` button next to the link to make the procedure easier. </issue> <code> [start of snippets/base/admin/adminmodels.py] 1 import re 2 3 from django.contrib import admin 4 from django.db.models import TextField, Q 5 from django.template.loader import get_template 6 from django.utils.safestring import mark_safe 7 8 from reversion.admin import VersionAdmin 9 from django_ace import AceWidget 10 from django_statsd.clients import statsd 11 from jinja2.meta import find_undeclared_variables 12 from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter 13 14 from snippets.base import forms, models 15 from snippets.base.models import JINJA_ENV 16 from snippets.base.admin.filters import ModifiedFilter, ReleaseFilter 17 18 19 MATCH_LOCALE_REGEX = re.compile('(\w+(?:-\w+)*)') 20 RESERVED_VARIABLES = ('_', 'snippet_id') 21 22 23 class ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin): 24 list_display = ('description', 'is_exclusion', 'startpage_version', 'name', 25 'version', 'locale', 'appbuildid', 'build_target', 26 'channel', 'os_version', 'distribution', 27 'distribution_version', 'modified') 28 list_filter = ('name', 'version', 'os_version', 'appbuildid', 29 'build_target', 'channel', 'distribution', 'locale') 30 save_on_top = True 31 search_fields = ('description',) 32 33 34 class LogEntryAdmin(admin.ModelAdmin): 35 list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message') 36 list_filter = ('user', 'content_type') 37 38 39 class SnippetTemplateVariableInline(admin.TabularInline): 40 model = models.SnippetTemplateVariable 41 formset = forms.SnippetTemplateVariableInlineFormset 42 max_num = 0 43 can_delete = False 44 readonly_fields = ('name',) 45 fields = ('name', 'type', 'order', 'description') 46 47 48 class SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin): 49 save_on_top = True 50 list_display = ('name', 'priority', 'hidden') 51 list_filter = ('hidden', 'startpage') 52 inlines = (SnippetTemplateVariableInline,) 53 formfield_overrides = { 54 TextField: {'widget': AceWidget(mode='html', theme='github', 55 width='1200px', height='500px')}, 56 } 57 58 class Media: 59 css = { 60 'all': ('css/admin.css',) 61 } 62 63 def save_related(self, request, form, formsets, change): 64 """ 65 After saving the related objects, remove and add 66 SnippetTemplateVariables depending on how the template code changed. 67 """ 68 super(SnippetTemplateAdmin, self).save_related(request, form, formsets, 69 change) 70 71 # Parse the template code and find any undefined variables. 72 ast = JINJA_ENV.env.parse(form.instance.code) 73 new_vars = find_undeclared_variables(ast) 74 var_manager = form.instance.variable_set 75 76 # Filter out reserved variable names. 77 new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES] 78 79 # Delete variables not in the new set. 80 var_manager.filter(~Q(name__in=new_vars)).delete() 81 82 # Create variables that don't exist. 83 for i, variable in enumerate(new_vars, start=1): 84 obj, _ = models.SnippetTemplateVariable.objects.get_or_create( 85 template=form.instance, name=variable) 86 if obj.order == 0: 87 obj.order = i * 10 88 obj.save() 89 90 91 class UploadedFileAdmin(admin.ModelAdmin): 92 readonly_fields = ('url', 'preview', 'snippets') 93 list_display = ('name', 'url', 'preview', 'modified') 94 prepopulated_fields = {'name': ('file',)} 95 form = forms.UploadedFileAdminForm 96 97 def preview(self, obj): 98 template = get_template('base/uploadedfile_preview.jinja') 99 return mark_safe(template.render({'file': obj})) 100 101 def snippets(self, obj): 102 """Snippets using this file.""" 103 template = get_template('base/uploadedfile_snippets.jinja') 104 return mark_safe(template.render({'snippets': obj.snippets})) 105 106 107 class AddonAdmin(admin.ModelAdmin): 108 list_display = ('name', 'guid') 109 110 111 class ASRSnippetAdmin(admin.ModelAdmin): 112 form = forms.ASRSnippetAdminForm 113 114 list_display_links = ( 115 'id', 116 'name', 117 ) 118 list_display = ( 119 'id', 120 'name', 121 'status', 122 'modified', 123 ) 124 list_filter = ( 125 ModifiedFilter, 126 'status', 127 ReleaseFilter, 128 ('template', RelatedDropdownFilter), 129 ) 130 search_fields = ( 131 'name', 132 ) 133 autocomplete_fields = ( 134 'campaign', 135 'target', 136 ) 137 preserve_filters = True 138 readonly_fields = ( 139 'created', 140 'modified', 141 'uuid', 142 'creator', 143 'preview_url', 144 ) 145 filter_horizontal = ('locales',) 146 save_on_top = True 147 save_as = True 148 view_on_site = False 149 150 fieldsets = ( 151 ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}), 152 ('Content', { 153 'description': ( 154 ''' 155 <strong>Available deep links:</strong><br/> 156 <ol> 157 <li><code>special:accounts</code> to open Firefox Accounts</li> 158 <li><code>special:appMenu</code> to open the hamburger menu</li> 159 </ol><br/> 160 <strong>Automatically add Snippet ID:</strong><br/> 161 You can use <code>[[snippet_id]]</code> in any field and it 162 will be automatically replaced by Snippet ID when served to users. 163 <br/> 164 Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code> # noqa 165 <br/> 166 ''' 167 ), 168 'fields': ('template', 'data'), 169 }), 170 ('Publishing Options', { 171 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',) 172 }), 173 ('Other Info', { 174 'fields': ('uuid', ('created', 'modified')), 175 'classes': ('collapse',) 176 }), 177 ) 178 179 class Media: 180 css = { 181 'all': ('css/admin/ASRSnippetAdmin.css',) 182 } 183 184 def save_model(self, request, obj, form, change): 185 obj.creator = request.user 186 statsd.incr('save.asrsnippet') 187 super().save_model(request, obj, form, change) 188 189 def preview_url(self, obj): 190 return obj.get_preview_url() 191 192 193 class CampaignAdmin(admin.ModelAdmin): 194 readonly_fields = ('created', 'modified', 'creator',) 195 prepopulated_fields = {'slug': ('name',)} 196 197 fieldsets = ( 198 ('ID', {'fields': ('name', 'slug')}), 199 ('Other Info', { 200 'fields': ('creator', ('created', 'modified')), 201 }), 202 ) 203 search_fields = ( 204 'name', 205 ) 206 207 def save_model(self, request, obj, form, change): 208 obj.creator = request.user 209 statsd.incr('save.campaign') 210 super().save_model(request, obj, form, change) 211 212 213 class TargetAdmin(admin.ModelAdmin): 214 form = forms.TargetAdminForm 215 readonly_fields = ('created', 'modified', 'creator', 'jexl_expr') 216 search_fields = ( 217 'name', 218 ) 219 fieldsets = ( 220 ('ID', {'fields': ('name',)}), 221 ('Product channels', { 222 'description': 'What channels will this snippet be available in?', 223 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),) 224 }), 225 ('Targeting', { 226 'fields': ( 227 'filtr_is_default_browser', 228 'filtr_updates_enabled', 229 'filtr_updates_autodownload_enabled', 230 'filtr_profile_age_created', 231 'filtr_firefox_version', 232 'filtr_previous_session_end', 233 'filtr_uses_firefox_sync', 234 'filtr_country', 235 'filtr_is_developer', 236 'filtr_current_search_engine', 237 'filtr_browser_addon', 238 'filtr_total_bookmarks_count', 239 ) 240 }), 241 ('Other Info', { 242 'fields': ('creator', ('created', 'modified'), 'jexl_expr'), 243 }), 244 ) 245 246 def save_model(self, request, obj, form, change): 247 obj.creator = request.user 248 statsd.incr('save.target') 249 super().save_model(request, obj, form, change) 250 [end of snippets/base/admin/adminmodels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py --- a/snippets/base/admin/adminmodels.py +++ b/snippets/base/admin/adminmodels.py @@ -180,6 +180,10 @@ css = { 'all': ('css/admin/ASRSnippetAdmin.css',) } + js = ( + 'js/admin/clipboard.min.js', + 'js/admin/copy_preview.js', + ) def save_model(self, request, obj, form, change): obj.creator = request.user @@ -187,7 +191,15 @@ super().save_model(request, obj, form, change) def preview_url(self, obj): - return obj.get_preview_url() + text = f''' + <span id="previewLinkUrl">{obj.get_preview_url()}</span> + <button id="copyPreviewLink" class="btn" + data-clipboard-target="#previewLinkUrl" + originalText="Copy to Clipboard" type="button"> + Copy to Clipboard + </button> + ''' + return mark_safe(text) class CampaignAdmin(admin.ModelAdmin):
{"golden_diff": "diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\n--- a/snippets/base/admin/adminmodels.py\n+++ b/snippets/base/admin/adminmodels.py\n@@ -180,6 +180,10 @@\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n+ js = (\n+ 'js/admin/clipboard.min.js',\n+ 'js/admin/copy_preview.js',\n+ )\n \n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n@@ -187,7 +191,15 @@\n super().save_model(request, obj, form, change)\n \n def preview_url(self, obj):\n- return obj.get_preview_url()\n+ text = f'''\n+ <span id=\"previewLinkUrl\">{obj.get_preview_url()}</span>\n+ <button id=\"copyPreviewLink\" class=\"btn\"\n+ data-clipboard-target=\"#previewLinkUrl\"\n+ originalText=\"Copy to Clipboard\" type=\"button\">\n+ Copy to Clipboard\n+ </button>\n+ '''\n+ return mark_safe(text)\n \n \n class CampaignAdmin(admin.ModelAdmin):\n", "issue": "Create a button to copy to clipboard the preview link on click\nPreview link for ASR Snippets cannot be directly clicked and instead must be copied and pasted to the URL bar. Create a `Copy` button next to the link to make the procedure easier. \n", "before_files": [{"content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n\n\nMATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n ModifiedFilter,\n 'status',\n ReleaseFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n )\n autocomplete_fields = (\n 'campaign',\n 'target',\n )\n preserve_filters = True\n readonly_fields = (\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n )\n filter_horizontal = ('locales',)\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n return obj.get_preview_url()\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n search_fields = (\n 'name',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py"}]}
3,087
260
gh_patches_debug_1928
rasdani/github-patches
git_diff
goauthentik__authentik-3299
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Get username from mailcow source **Is your feature request related to a problem? Please describe.** I like to get a username from mailcow. With username the enrollment for new users is more simple. **Describe the solution you'd like** Set username to full_name provided by mailcow oauths source. **Additional context** For other sources the username is also set redundant to another attribute if there is no special source attribute: azure_ad.py: ``` "username": info.get("displayName"), "name": info.get("displayName"), ``` discord.py: ``` "username": info.get("username"), "name": info.get("username"), ``` facebook.py: ``` "username": info.get("name"), "name": info.get("name"), ``` reddit.py ``` "username": info.get("name"), "name": info.get("name"), ``` </issue> <code> [start of authentik/sources/oauth/types/mailcow.py] 1 """Mailcow OAuth Views""" 2 from typing import Any, Optional 3 4 from requests.exceptions import RequestException 5 from structlog.stdlib import get_logger 6 7 from authentik.sources.oauth.clients.oauth2 import OAuth2Client 8 from authentik.sources.oauth.types.manager import MANAGER, SourceType 9 from authentik.sources.oauth.views.callback import OAuthCallback 10 from authentik.sources.oauth.views.redirect import OAuthRedirect 11 12 LOGGER = get_logger() 13 14 15 class MailcowOAuthRedirect(OAuthRedirect): 16 """Mailcow OAuth2 Redirect""" 17 18 def get_additional_parameters(self, source): # pragma: no cover 19 return { 20 "scope": ["profile"], 21 } 22 23 24 class MailcowOAuth2Client(OAuth2Client): 25 """MailcowOAuth2Client, for some reason, mailcow does not like the default headers""" 26 27 def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]: 28 "Fetch user profile information." 29 profile_url = self.source.type.profile_url or "" 30 if self.source.type.urls_customizable and self.source.profile_url: 31 profile_url = self.source.profile_url 32 try: 33 response = self.session.request( 34 "get", 35 f"{profile_url}?access_token={token['access_token']}", 36 ) 37 response.raise_for_status() 38 except RequestException as exc: 39 LOGGER.warning("Unable to fetch user profile", exc=exc, body=response.text) 40 return None 41 else: 42 return response.json() 43 44 45 class MailcowOAuth2Callback(OAuthCallback): 46 """Mailcow OAuth2 Callback""" 47 48 client_class = MailcowOAuth2Client 49 50 def get_user_enroll_context( 51 self, 52 info: dict[str, Any], 53 ) -> dict[str, Any]: 54 return { 55 "email": info.get("email"), 56 "name": info.get("full_name"), 57 } 58 59 60 @MANAGER.type() 61 class MailcowType(SourceType): 62 """Mailcow Type definition""" 63 64 callback_view = MailcowOAuth2Callback 65 redirect_view = MailcowOAuthRedirect 66 name = "Mailcow" 67 slug = "mailcow" 68 69 urls_customizable = True 70 [end of authentik/sources/oauth/types/mailcow.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py --- a/authentik/sources/oauth/types/mailcow.py +++ b/authentik/sources/oauth/types/mailcow.py @@ -52,6 +52,7 @@ info: dict[str, Any], ) -> dict[str, Any]: return { + "username": info.get("full_name"), "email": info.get("email"), "name": info.get("full_name"), }
{"golden_diff": "diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py\n--- a/authentik/sources/oauth/types/mailcow.py\n+++ b/authentik/sources/oauth/types/mailcow.py\n@@ -52,6 +52,7 @@\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n+ \"username\": info.get(\"full_name\"),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n", "issue": "Get username from mailcow source\n**Is your feature request related to a problem? Please describe.**\r\nI like to get a username from mailcow. With username the enrollment for new users is more simple.\r\n\r\n**Describe the solution you'd like**\r\nSet username to full_name provided by mailcow oauths source.\r\n\r\n**Additional context**\r\nFor other sources the username is also set redundant to another attribute if there is no special source attribute:\r\nazure_ad.py:\r\n```\r\n \"username\": info.get(\"displayName\"),\r\n \"name\": info.get(\"displayName\"),\r\n```\r\n\r\ndiscord.py:\r\n```\r\n \"username\": info.get(\"username\"),\r\n \"name\": info.get(\"username\"),\r\n```\r\n\r\nfacebook.py:\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\r\nreddit.py\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Mailcow OAuth Views\"\"\"\nfrom typing import Any, Optional\n\nfrom requests.exceptions import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import OAuth2Client\nfrom authentik.sources.oauth.types.manager import MANAGER, SourceType\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass MailcowOAuthRedirect(OAuthRedirect):\n \"\"\"Mailcow OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"profile\"],\n }\n\n\nclass MailcowOAuth2Client(OAuth2Client):\n \"\"\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\n \"Fetch user profile information.\"\n profile_url = self.source.type.profile_url or \"\"\n if self.source.type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n try:\n response = self.session.request(\n \"get\",\n f\"{profile_url}?access_token={token['access_token']}\",\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\"Unable to fetch user profile\", exc=exc, body=response.text)\n return None\n else:\n return response.json()\n\n\nclass MailcowOAuth2Callback(OAuthCallback):\n \"\"\"Mailcow OAuth2 Callback\"\"\"\n\n client_class = MailcowOAuth2Client\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n\n\[email protected]()\nclass MailcowType(SourceType):\n \"\"\"Mailcow Type definition\"\"\"\n\n callback_view = MailcowOAuth2Callback\n redirect_view = MailcowOAuthRedirect\n name = \"Mailcow\"\n slug = \"mailcow\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/mailcow.py"}]}
1,319
111
gh_patches_debug_26693
rasdani/github-patches
git_diff
benoitc__gunicorn-1581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> --config doesn't work in GUNICORN_CMD_ARGS Specifying `--config` in the `GUNICORN_CMD_ARGS` environment variable quietly fails as config file loading only happens when it is passed as an argument: https://github.com/benoitc/gunicorn/blob/328e509260ae70de6c04c5ba885ee17960b3ced5/gunicorn/app/base.py#L137-L175 </issue> <code> [start of gunicorn/app/base.py] 1 # -*- coding: utf-8 - 2 # 3 # This file is part of gunicorn released under the MIT license. 4 # See the NOTICE for more information. 5 from __future__ import print_function 6 7 import os 8 import sys 9 import traceback 10 11 from gunicorn._compat import execfile_ 12 from gunicorn import util 13 from gunicorn.arbiter import Arbiter 14 from gunicorn.config import Config, get_default_config_file 15 from gunicorn import debug 16 17 class BaseApplication(object): 18 """ 19 An application interface for configuring and loading 20 the various necessities for any given web framework. 21 """ 22 def __init__(self, usage=None, prog=None): 23 self.usage = usage 24 self.cfg = None 25 self.callable = None 26 self.prog = prog 27 self.logger = None 28 self.do_load_config() 29 30 def do_load_config(self): 31 """ 32 Loads the configuration 33 """ 34 try: 35 self.load_default_config() 36 self.load_config() 37 except Exception as e: 38 print("\nError: %s" % str(e), file=sys.stderr) 39 sys.stderr.flush() 40 sys.exit(1) 41 42 def load_default_config(self): 43 # init configuration 44 self.cfg = Config(self.usage, prog=self.prog) 45 46 def init(self, parser, opts, args): 47 raise NotImplementedError 48 49 def load(self): 50 raise NotImplementedError 51 52 def load_config(self): 53 """ 54 This method is used to load the configuration from one or several input(s). 55 Custom Command line, configuration file. 56 You have to override this method in your class. 57 """ 58 raise NotImplementedError 59 60 def reload(self): 61 self.do_load_config() 62 if self.cfg.spew: 63 debug.spew() 64 65 def wsgi(self): 66 if self.callable is None: 67 self.callable = self.load() 68 return self.callable 69 70 def run(self): 71 try: 72 Arbiter(self).run() 73 except RuntimeError as e: 74 print("\nError: %s\n" % e, file=sys.stderr) 75 sys.stderr.flush() 76 sys.exit(1) 77 78 class Application(BaseApplication): 79 80 def get_config_from_filename(self, filename): 81 82 if not os.path.exists(filename): 83 raise RuntimeError("%r doesn't exist" % filename) 84 85 cfg = { 86 "__builtins__": __builtins__, 87 "__name__": "__config__", 88 "__file__": filename, 89 "__doc__": None, 90 "__package__": None 91 } 92 try: 93 execfile_(filename, cfg, cfg) 94 except Exception: 95 print("Failed to read config file: %s" % filename, file=sys.stderr) 96 traceback.print_exc() 97 sys.stderr.flush() 98 sys.exit(1) 99 100 return cfg 101 102 def get_config_from_module_name(self, module_name): 103 return vars(util.import_module(module_name)) 104 105 def load_config_from_module_name_or_filename(self, location): 106 """ 107 Loads the configuration file: the file is a python file, otherwise raise an RuntimeError 108 Exception or stop the process if the configuration file contains a syntax error. 109 """ 110 111 if location.startswith("python:"): 112 module_name = location[len("python:"):] 113 cfg = self.get_config_from_module_name(module_name) 114 else: 115 if location.startswith("file:"): 116 filename = location[len("file:"):] 117 else: 118 filename = location 119 cfg = self.get_config_from_filename(filename) 120 121 for k, v in cfg.items(): 122 # Ignore unknown names 123 if k not in self.cfg.settings: 124 continue 125 try: 126 self.cfg.set(k.lower(), v) 127 except: 128 print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr) 129 sys.stderr.flush() 130 raise 131 132 return cfg 133 134 def load_config_from_file(self, filename): 135 return self.load_config_from_module_name_or_filename(location=filename) 136 137 def load_config(self): 138 # parse console args 139 parser = self.cfg.parser() 140 args = parser.parse_args() 141 142 # optional settings from apps 143 cfg = self.init(parser, args, args.args) 144 145 # Load up the any app specific configuration 146 if cfg: 147 for k, v in cfg.items(): 148 self.cfg.set(k.lower(), v) 149 150 if args.config: 151 self.load_config_from_file(args.config) 152 else: 153 default_config = get_default_config_file() 154 if default_config is not None: 155 self.load_config_from_file(default_config) 156 157 # Load up environment configuration 158 env_vars = self.cfg.get_cmd_args_from_env() 159 if env_vars: 160 env_args = parser.parse_args(env_vars) 161 for k, v in vars(env_args).items(): 162 if v is None: 163 continue 164 if k == "args": 165 continue 166 self.cfg.set(k.lower(), v) 167 168 # Lastly, update the configuration with any command line 169 # settings. 170 for k, v in vars(args).items(): 171 if v is None: 172 continue 173 if k == "args": 174 continue 175 self.cfg.set(k.lower(), v) 176 177 def run(self): 178 if self.cfg.check_config: 179 try: 180 self.load() 181 except: 182 msg = "\nError while loading the application:\n" 183 print(msg, file=sys.stderr) 184 traceback.print_exc() 185 sys.stderr.flush() 186 sys.exit(1) 187 sys.exit(0) 188 189 if self.cfg.spew: 190 debug.spew() 191 192 if self.cfg.daemon: 193 util.daemonize(self.cfg.enable_stdio_inheritance) 194 195 # set python paths 196 if self.cfg.pythonpath: 197 paths = self.cfg.pythonpath.split(",") 198 for path in paths: 199 pythonpath = os.path.abspath(path) 200 if pythonpath not in sys.path: 201 sys.path.insert(0, pythonpath) 202 203 super(Application, self).run() 204 [end of gunicorn/app/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gunicorn/app/base.py b/gunicorn/app/base.py --- a/gunicorn/app/base.py +++ b/gunicorn/app/base.py @@ -147,26 +147,26 @@ for k, v in cfg.items(): self.cfg.set(k.lower(), v) + env_args = parser.parse_args(self.cfg.get_cmd_args_from_env()) + if args.config: self.load_config_from_file(args.config) + elif env_args.config: + self.load_config_from_file(env_args.config) else: default_config = get_default_config_file() if default_config is not None: self.load_config_from_file(default_config) # Load up environment configuration - env_vars = self.cfg.get_cmd_args_from_env() - if env_vars: - env_args = parser.parse_args(env_vars) - for k, v in vars(env_args).items(): - if v is None: - continue - if k == "args": - continue - self.cfg.set(k.lower(), v) + for k, v in vars(env_args).items(): + if v is None: + continue + if k == "args": + continue + self.cfg.set(k.lower(), v) - # Lastly, update the configuration with any command line - # settings. + # Lastly, update the configuration with any command line settings. for k, v in vars(args).items(): if v is None: continue
{"golden_diff": "diff --git a/gunicorn/app/base.py b/gunicorn/app/base.py\n--- a/gunicorn/app/base.py\n+++ b/gunicorn/app/base.py\n@@ -147,26 +147,26 @@\n for k, v in cfg.items():\n self.cfg.set(k.lower(), v)\n \n+ env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n+\n if args.config:\n self.load_config_from_file(args.config)\n+ elif env_args.config:\n+ self.load_config_from_file(env_args.config)\n else:\n default_config = get_default_config_file()\n if default_config is not None:\n self.load_config_from_file(default_config)\n \n # Load up environment configuration\n- env_vars = self.cfg.get_cmd_args_from_env()\n- if env_vars:\n- env_args = parser.parse_args(env_vars)\n- for k, v in vars(env_args).items():\n- if v is None:\n- continue\n- if k == \"args\":\n- continue\n- self.cfg.set(k.lower(), v)\n+ for k, v in vars(env_args).items():\n+ if v is None:\n+ continue\n+ if k == \"args\":\n+ continue\n+ self.cfg.set(k.lower(), v)\n \n- # Lastly, update the configuration with any command line\n- # settings.\n+ # Lastly, update the configuration with any command line settings.\n for k, v in vars(args).items():\n if v is None:\n continue\n", "issue": "--config doesn't work in GUNICORN_CMD_ARGS\nSpecifying `--config` in the `GUNICORN_CMD_ARGS` environment variable quietly fails as config file loading only happens when it is passed as an argument:\r\nhttps://github.com/benoitc/gunicorn/blob/328e509260ae70de6c04c5ba885ee17960b3ced5/gunicorn/app/base.py#L137-L175\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport traceback\n\nfrom gunicorn._compat import execfile_\nfrom gunicorn import util\nfrom gunicorn.arbiter import Arbiter\nfrom gunicorn.config import Config, get_default_config_file\nfrom gunicorn import debug\n\nclass BaseApplication(object):\n \"\"\"\n An application interface for configuring and loading\n the various necessities for any given web framework.\n \"\"\"\n def __init__(self, usage=None, prog=None):\n self.usage = usage\n self.cfg = None\n self.callable = None\n self.prog = prog\n self.logger = None\n self.do_load_config()\n\n def do_load_config(self):\n \"\"\"\n Loads the configuration\n \"\"\"\n try:\n self.load_default_config()\n self.load_config()\n except Exception as e:\n print(\"\\nError: %s\" % str(e), file=sys.stderr)\n sys.stderr.flush()\n sys.exit(1)\n\n def load_default_config(self):\n # init configuration\n self.cfg = Config(self.usage, prog=self.prog)\n\n def init(self, parser, opts, args):\n raise NotImplementedError\n\n def load(self):\n raise NotImplementedError\n\n def load_config(self):\n \"\"\"\n This method is used to load the configuration from one or several input(s).\n Custom Command line, configuration file.\n You have to override this method in your class.\n \"\"\"\n raise NotImplementedError\n\n def reload(self):\n self.do_load_config()\n if self.cfg.spew:\n debug.spew()\n\n def wsgi(self):\n if self.callable is None:\n self.callable = self.load()\n return self.callable\n\n def run(self):\n try:\n Arbiter(self).run()\n except RuntimeError as e:\n print(\"\\nError: %s\\n\" % e, file=sys.stderr)\n sys.stderr.flush()\n sys.exit(1)\n\nclass Application(BaseApplication):\n\n def get_config_from_filename(self, filename):\n\n if not os.path.exists(filename):\n raise RuntimeError(\"%r doesn't exist\" % filename)\n\n cfg = {\n \"__builtins__\": __builtins__,\n \"__name__\": \"__config__\",\n \"__file__\": filename,\n \"__doc__\": None,\n \"__package__\": None\n }\n try:\n execfile_(filename, cfg, cfg)\n except Exception:\n print(\"Failed to read config file: %s\" % filename, file=sys.stderr)\n traceback.print_exc()\n sys.stderr.flush()\n sys.exit(1)\n\n return cfg\n\n def get_config_from_module_name(self, module_name):\n return vars(util.import_module(module_name))\n\n def load_config_from_module_name_or_filename(self, location):\n \"\"\"\n Loads the configuration file: the file is a python file, otherwise raise an RuntimeError\n Exception or stop the process if the configuration file contains a syntax error.\n \"\"\"\n\n if location.startswith(\"python:\"):\n module_name = location[len(\"python:\"):]\n cfg = self.get_config_from_module_name(module_name)\n else:\n if location.startswith(\"file:\"):\n filename = location[len(\"file:\"):]\n else:\n filename = location\n cfg = self.get_config_from_filename(filename)\n\n for k, v in cfg.items():\n # Ignore unknown names\n if k not in self.cfg.settings:\n continue\n try:\n self.cfg.set(k.lower(), v)\n except:\n print(\"Invalid value for %s: %s\\n\" % (k, v), file=sys.stderr)\n sys.stderr.flush()\n raise\n\n return cfg\n\n def load_config_from_file(self, filename):\n return self.load_config_from_module_name_or_filename(location=filename)\n\n def load_config(self):\n # parse console args\n parser = self.cfg.parser()\n args = parser.parse_args()\n\n # optional settings from apps\n cfg = self.init(parser, args, args.args)\n\n # Load up the any app specific configuration\n if cfg:\n for k, v in cfg.items():\n self.cfg.set(k.lower(), v)\n\n if args.config:\n self.load_config_from_file(args.config)\n else:\n default_config = get_default_config_file()\n if default_config is not None:\n self.load_config_from_file(default_config)\n\n # Load up environment configuration\n env_vars = self.cfg.get_cmd_args_from_env()\n if env_vars:\n env_args = parser.parse_args(env_vars)\n for k, v in vars(env_args).items():\n if v is None:\n continue\n if k == \"args\":\n continue\n self.cfg.set(k.lower(), v)\n\n # Lastly, update the configuration with any command line\n # settings.\n for k, v in vars(args).items():\n if v is None:\n continue\n if k == \"args\":\n continue\n self.cfg.set(k.lower(), v)\n\n def run(self):\n if self.cfg.check_config:\n try:\n self.load()\n except:\n msg = \"\\nError while loading the application:\\n\"\n print(msg, file=sys.stderr)\n traceback.print_exc()\n sys.stderr.flush()\n sys.exit(1)\n sys.exit(0)\n\n if self.cfg.spew:\n debug.spew()\n\n if self.cfg.daemon:\n util.daemonize(self.cfg.enable_stdio_inheritance)\n\n # set python paths\n if self.cfg.pythonpath:\n paths = self.cfg.pythonpath.split(\",\")\n for path in paths:\n pythonpath = os.path.abspath(path)\n if pythonpath not in sys.path:\n sys.path.insert(0, pythonpath)\n\n super(Application, self).run()\n", "path": "gunicorn/app/base.py"}]}
2,398
328
gh_patches_debug_6594
rasdani/github-patches
git_diff
pytorch__ignite-1192
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DistributedProxySampler RuntimeError when indices are padded ## 🐛 Bug description The RuntimeError that occurs in the `DistributedProxySampler` on [line 241](https://github.com/pytorch/ignite/blob/master/ignite/distributed/auto.py#L241) shouldn't be there since the indices are padded with the full sample which was updated because of [this comment](https://github.com/pytorch/pytorch/issues/23430#issuecomment-597191137). ## Environment - PyTorch Version (e.g., 1.4): - Ignite Version (e.g., 0.3.0): - OS (e.g., Linux): - How you installed Ignite (`conda`, `pip`, source): - Python version: - Any other relevant information: </issue> <code> [start of ignite/distributed/auto.py] 1 import warnings 2 3 import torch 4 import torch.nn as nn 5 from torch.optim.optimizer import Optimizer 6 from torch.utils.data import DataLoader, Dataset 7 from torch.utils.data.distributed import DistributedSampler 8 from torch.utils.data.sampler import Sampler 9 10 from ignite.distributed import utils as idist 11 from ignite.distributed.comp_models import native as idist_native 12 from ignite.distributed.comp_models import xla as idist_xla 13 from ignite.utils import setup_logger 14 15 __all__ = ["auto_dataloader", "auto_model", "auto_optim", "DistributedProxySampler"] 16 17 18 def auto_dataloader(dataset, **kwargs): 19 """Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting 20 all available backends from :meth:`~ignite.distributed.utils.available_backends()`). 21 22 Internally, we create a dataloader with provided kwargs while applying the following updates: 23 24 - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size. 25 - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size. 26 - if no sampler provided by user, `torch DistributedSampler` is setup. 27 - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`. 28 - if the default device is 'cuda', `pin_memory` is automatically set to `True`. 29 30 .. warning:: 31 32 Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch 33 sampler is compatible with distributed configuration. 34 35 Examples: 36 37 .. code-block:: python 38 39 import ignite.distribted as idist 40 41 train_loader = idist.auto_dataloader( 42 train_dataset, 43 batch_size=32, 44 num_workers=4, 45 shuffle=True, 46 pin_memory="cuda" in idist.device().type, 47 drop_last=True, 48 ) 49 50 Args: 51 dataset (Dataset): input torch dataset 52 **kwargs: keyword arguments for `torch DataLoader`_. 53 54 Returns: 55 `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices 56 57 .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader 58 .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178 59 .. _torch DistributedSampler: 60 https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler 61 """ 62 rank = idist.get_rank() 63 world_size = idist.get_world_size() 64 65 logger = setup_logger(__name__ + ".auto_dataloader") 66 if world_size > 1: 67 if "batch_size" in kwargs and kwargs["batch_size"] >= world_size: 68 kwargs["batch_size"] //= world_size 69 70 nproc = idist.get_nproc_per_node() 71 if "num_workers" in kwargs and kwargs["num_workers"] >= nproc: 72 kwargs["num_workers"] = (kwargs["num_workers"] + nproc - 1) // nproc 73 74 if "batch_sampler" not in kwargs: 75 if kwargs.get("sampler", None) is not None: 76 sampler = DistributedProxySampler(kwargs["sampler"], num_replicas=world_size, rank=rank) 77 else: 78 sampler = DistributedSampler( 79 dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get("shuffle", True) 80 ) 81 # we need to remove "shuffle" from kwargs if sampler is used 82 if "shuffle" in kwargs: 83 del kwargs["shuffle"] 84 85 kwargs["sampler"] = sampler 86 else: 87 warnings.warn( 88 "Found batch_sampler in provided kwargs. Please, make sure that it is compatible " 89 "with distributed configuration" 90 ) 91 92 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get("pin_memory", False): 93 # TODO: How about XLA GPU ? 94 warnings.warn( 95 "Found incompatible options: xla support and pin_memory args equal True. " 96 "Argument `pin_memory=False` will be used to construct data loader." 97 ) 98 kwargs["pin_memory"] = False 99 else: 100 kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type) 101 102 logger.info("Use data loader kwargs for dataset '{}': \n\t{}".format(repr(dataset)[:20].strip(), kwargs)) 103 dataloader = DataLoader(dataset, **kwargs) 104 105 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1: 106 107 logger.info("DataLoader is wrapped by `MpDeviceLoader` on XLA") 108 109 mp_device_loader_cls = _MpDeviceLoader 110 try: 111 from torch_xla.distributed.parallel_loader import MpDeviceLoader 112 113 mp_device_loader_cls = MpDeviceLoader 114 except ImportError: 115 pass 116 117 sampler = dataloader.sampler 118 dataloader = mp_device_loader_cls(dataloader, idist.device()) 119 dataloader.sampler = sampler 120 121 return dataloader 122 123 124 def auto_model(model: nn.Module) -> nn.Module: 125 """Helper method to adapt provided model for non-distributed and distributed configurations (supporting 126 all available backends from :meth:`~ignite.distributed.utils.available_backends()`). 127 128 Internally, we perform to following: 129 130 - send model to current :meth:`~ignite.distributed.utils.device()`. 131 - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1 132 - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available. 133 134 Examples: 135 136 .. code-block:: python 137 138 import ignite.distribted as idist 139 140 model = idist.auto_model(model) 141 142 Args: 143 model (torch.nn.Module): model to adapt. 144 145 Returns: 146 torch.nn.Module 147 148 .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel 149 .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel 150 """ 151 logger = setup_logger(__name__ + ".auto_model") 152 153 model.to(idist.device()) 154 155 # distributed data parallel model 156 if idist.get_world_size() > 1: 157 if idist.backend() == idist_native.NCCL: 158 lrank = idist.get_local_rank() 159 logger.info("Apply torch DistributedDataParallel on model, device id: {}".format(lrank)) 160 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,]) 161 elif idist.backend() == idist_native.GLOO: 162 logger.info("Apply torch DistributedDataParallel on model") 163 model = torch.nn.parallel.DistributedDataParallel(model) 164 165 # not distributed but multiple GPUs reachable so data parallel model 166 elif torch.cuda.device_count() > 1 and "cuda" in idist.device().type: 167 logger.info("Apply torch DataParallel on model") 168 model = torch.nn.parallel.DataParallel(model) 169 170 return model 171 172 173 def auto_optim(optimizer: Optimizer) -> Optimizer: 174 """Helper method to adapt optimizer for non-distributed and distributed configurations (supporting 175 all available backends from :meth:`~ignite.distributed.utils.available_backends()`). 176 177 Internally, this method is no-op for non-distributed and torch native distributed configuration. 178 For XLA distributed configuration, we create a new class that inherits from provided optimizer. 179 The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation. 180 181 Examples: 182 183 .. code-block:: python 184 185 import ignite.distribted as idist 186 187 optimizer = idist.auto_optim(optimizer) 188 189 190 Args: 191 optimizer (Optimizer): input torch optimizer 192 193 Returns: 194 Optimizer 195 196 .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step 197 198 """ 199 if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU): 200 return optimizer 201 202 cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__)) 203 return cls(optimizer) 204 205 206 class DistributedProxySampler(DistributedSampler): 207 """Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration. 208 209 Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407 210 211 212 .. note:: 213 Input sampler is assumed to have a constant size. 214 215 Args: 216 sampler (Sampler): Input torch data sampler. 217 num_replicas (int, optional): Number of processes participating in distributed training. 218 rank (int, optional): Rank of the current process within ``num_replicas``. 219 220 """ 221 222 def __init__(self, sampler: Sampler, num_replicas=None, rank=None): 223 224 if not isinstance(sampler, Sampler): 225 raise TypeError("Argument sampler should be instance of torch Sampler, but given: {}".format(type(sampler))) 226 227 if not hasattr(sampler, "__len__"): 228 raise TypeError("Argument sampler should have length") 229 230 super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False) 231 self.sampler = sampler 232 233 def __iter__(self): 234 # deterministically shuffle based on epoch 235 torch.manual_seed(self.epoch) 236 237 indices = [] 238 while len(indices) < self.total_size: 239 indices += list(self.sampler) 240 241 if len(indices) != self.total_size: 242 raise RuntimeError("{} vs {}".format(len(indices), self.total_size)) 243 244 # subsample 245 indices = indices[self.rank : self.total_size : self.num_replicas] 246 if len(indices) != self.num_samples: 247 raise RuntimeError("{} vs {}".format(len(indices), self.num_samples)) 248 249 return iter(indices) 250 251 252 if idist.has_xla_support: 253 254 import torch_xla.core.xla_model as xm 255 from torch_xla.distributed.parallel_loader import ParallelLoader 256 257 class _MpDeviceLoader: 258 # https://github.com/pytorch/xla/pull/2117 259 # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available 260 def __init__(self, loader, device, **kwargs): 261 self._loader = loader 262 self._device = device 263 self._parallel_loader_kwargs = kwargs 264 265 def __iter__(self): 266 parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs) 267 return parallel_loader.per_device_loader(self._device) 268 269 def __len__(self): 270 return len(self._loader) 271 272 class _XLADistributedOptimizer(Optimizer): 273 def __init__(self, optimizer): 274 super(self.__class__, self).__init__(optimizer.param_groups) 275 self.wrapped_optimizer = optimizer 276 277 def step(self, closure=None): 278 xm.optimizer_step(self.wrapped_optimizer, barrier=True) 279 [end of ignite/distributed/auto.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py --- a/ignite/distributed/auto.py +++ b/ignite/distributed/auto.py @@ -238,8 +238,8 @@ while len(indices) < self.total_size: indices += list(self.sampler) - if len(indices) != self.total_size: - raise RuntimeError("{} vs {}".format(len(indices), self.total_size)) + if len(indices) > self.total_size: + indices = indices[: self.total_size] # subsample indices = indices[self.rank : self.total_size : self.num_replicas]
{"golden_diff": "diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py\n--- a/ignite/distributed/auto.py\n+++ b/ignite/distributed/auto.py\n@@ -238,8 +238,8 @@\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n \n- if len(indices) != self.total_size:\n- raise RuntimeError(\"{} vs {}\".format(len(indices), self.total_size))\n+ if len(indices) > self.total_size:\n+ indices = indices[: self.total_size]\n \n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n", "issue": "DistributedProxySampler RuntimeError when indices are padded\n## \ud83d\udc1b Bug description\r\n\r\nThe RuntimeError that occurs in the `DistributedProxySampler` on [line 241](https://github.com/pytorch/ignite/blob/master/ignite/distributed/auto.py#L241) shouldn't be there since the indices are padded with the full sample which was updated because of [this comment](https://github.com/pytorch/pytorch/issues/23430#issuecomment-597191137).\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4):\r\n - Ignite Version (e.g., 0.3.0):\r\n - OS (e.g., Linux):\r\n - How you installed Ignite (`conda`, `pip`, source):\r\n - Python version:\r\n - Any other relevant information:\r\n\n", "before_files": [{"content": "import warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler\n\nfrom ignite.distributed import utils as idist\nfrom ignite.distributed.comp_models import native as idist_native\nfrom ignite.distributed.comp_models import xla as idist_xla\nfrom ignite.utils import setup_logger\n\n__all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n\n\ndef auto_dataloader(dataset, **kwargs):\n \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we create a dataloader with provided kwargs while applying the following updates:\n\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n - if no sampler provided by user, `torch DistributedSampler` is setup.\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n\n .. warning::\n\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n sampler is compatible with distributed configuration.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n train_loader = idist.auto_dataloader(\n train_dataset,\n batch_size=32,\n num_workers=4,\n shuffle=True,\n pin_memory=\"cuda\" in idist.device().type,\n drop_last=True,\n )\n\n Args:\n dataset (Dataset): input torch dataset\n **kwargs: keyword arguments for `torch DataLoader`_.\n\n Returns:\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n .. _torch DistributedSampler:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n \"\"\"\n rank = idist.get_rank()\n world_size = idist.get_world_size()\n\n logger = setup_logger(__name__ + \".auto_dataloader\")\n if world_size > 1:\n if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n kwargs[\"batch_size\"] //= world_size\n\n nproc = idist.get_nproc_per_node()\n if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n\n if \"batch_sampler\" not in kwargs:\n if kwargs.get(\"sampler\", None) is not None:\n sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n else:\n sampler = DistributedSampler(\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n )\n # we need to remove \"shuffle\" from kwargs if sampler is used\n if \"shuffle\" in kwargs:\n del kwargs[\"shuffle\"]\n\n kwargs[\"sampler\"] = sampler\n else:\n warnings.warn(\n \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n \"with distributed configuration\"\n )\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n # TODO: How about XLA GPU ?\n warnings.warn(\n \"Found incompatible options: xla support and pin_memory args equal True. \"\n \"Argument `pin_memory=False` will be used to construct data loader.\"\n )\n kwargs[\"pin_memory\"] = False\n else:\n kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n\n logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n dataloader = DataLoader(dataset, **kwargs)\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n\n logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n\n mp_device_loader_cls = _MpDeviceLoader\n try:\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\n\n mp_device_loader_cls = MpDeviceLoader\n except ImportError:\n pass\n\n sampler = dataloader.sampler\n dataloader = mp_device_loader_cls(dataloader, idist.device())\n dataloader.sampler = sampler\n\n return dataloader\n\n\ndef auto_model(model: nn.Module) -> nn.Module:\n \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we perform to following:\n\n - send model to current :meth:`~ignite.distributed.utils.device()`.\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model = idist.auto_model(model)\n\n Args:\n model (torch.nn.Module): model to adapt.\n\n Returns:\n torch.nn.Module\n\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n\n model.to(idist.device())\n\n # distributed data parallel model\n if idist.get_world_size() > 1:\n if idist.backend() == idist_native.NCCL:\n lrank = idist.get_local_rank()\n logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n elif idist.backend() == idist_native.GLOO:\n logger.info(\"Apply torch DistributedDataParallel on model\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # not distributed but multiple GPUs reachable so data parallel model\n elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n logger.info(\"Apply torch DataParallel on model\")\n model = torch.nn.parallel.DataParallel(model)\n\n return model\n\n\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\n \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n optimizer = idist.auto_optim(optimizer)\n\n\n Args:\n optimizer (Optimizer): input torch optimizer\n\n Returns:\n Optimizer\n\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n\n \"\"\"\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n return optimizer\n\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n return cls(optimizer)\n\n\nclass DistributedProxySampler(DistributedSampler):\n \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n\n\n .. note::\n Input sampler is assumed to have a constant size.\n\n Args:\n sampler (Sampler): Input torch data sampler.\n num_replicas (int, optional): Number of processes participating in distributed training.\n rank (int, optional): Rank of the current process within ``num_replicas``.\n\n \"\"\"\n\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n\n if not isinstance(sampler, Sampler):\n raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n\n if not hasattr(sampler, \"__len__\"):\n raise TypeError(\"Argument sampler should have length\")\n\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n self.sampler = sampler\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n torch.manual_seed(self.epoch)\n\n indices = []\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n\n if len(indices) != self.total_size:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.total_size))\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n if len(indices) != self.num_samples:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n\n return iter(indices)\n\n\nif idist.has_xla_support:\n\n import torch_xla.core.xla_model as xm\n from torch_xla.distributed.parallel_loader import ParallelLoader\n\n class _MpDeviceLoader:\n # https://github.com/pytorch/xla/pull/2117\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n def __init__(self, loader, device, **kwargs):\n self._loader = loader\n self._device = device\n self._parallel_loader_kwargs = kwargs\n\n def __iter__(self):\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n return parallel_loader.per_device_loader(self._device)\n\n def __len__(self):\n return len(self._loader)\n\n class _XLADistributedOptimizer(Optimizer):\n def __init__(self, optimizer):\n super(self.__class__, self).__init__(optimizer.param_groups)\n self.wrapped_optimizer = optimizer\n\n def step(self, closure=None):\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n", "path": "ignite/distributed/auto.py"}]}
3,884
138
gh_patches_debug_3662
rasdani/github-patches
git_diff
scikit-hep__awkward-2169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> run cpp tests in CI ### Version of Awkward Array 2.0.6 ### Description and code to reproduce @agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests. </issue> <code> [start of dev/copy-cpp-headers.py] 1 """Copy the header-only cpp headers into the various package directories that they are required""" 2 import pathlib 3 import shutil 4 5 root_path = pathlib.Path(__file__).absolute().parents[1] 6 source_path = root_path / "header-only" 7 dest_paths = ( 8 root_path / "awkward-cpp" / "header-only", 9 root_path / "src" / "awkward" / "_connect" / "header-only", 10 ) 11 12 if __name__ == "__main__": 13 for path in dest_paths: 14 if path.exists(): 15 shutil.rmtree(path) 16 shutil.copytree(source_path, path) 17 [end of dev/copy-cpp-headers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py --- a/dev/copy-cpp-headers.py +++ b/dev/copy-cpp-headers.py @@ -3,7 +3,7 @@ import shutil root_path = pathlib.Path(__file__).absolute().parents[1] -source_path = root_path / "header-only" +source_path = root_path / "header-only" / "include" dest_paths = ( root_path / "awkward-cpp" / "header-only", root_path / "src" / "awkward" / "_connect" / "header-only",
{"golden_diff": "diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py\n--- a/dev/copy-cpp-headers.py\n+++ b/dev/copy-cpp-headers.py\n@@ -3,7 +3,7 @@\n import shutil\n \n root_path = pathlib.Path(__file__).absolute().parents[1]\n-source_path = root_path / \"header-only\"\n+source_path = root_path / \"header-only\" / \"include\"\n dest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n", "issue": "run cpp tests in CI \n### Version of Awkward Array\n\n2.0.6\n\n### Description and code to reproduce\n\n@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.\n", "before_files": [{"content": "\"\"\"Copy the header-only cpp headers into the various package directories that they are required\"\"\"\nimport pathlib\nimport shutil\n\nroot_path = pathlib.Path(__file__).absolute().parents[1]\nsource_path = root_path / \"header-only\"\ndest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n)\n\nif __name__ == \"__main__\":\n for path in dest_paths:\n if path.exists():\n shutil.rmtree(path)\n shutil.copytree(source_path, path)\n", "path": "dev/copy-cpp-headers.py"}]}
746
136
gh_patches_debug_23142
rasdani/github-patches
git_diff
netbox-community__netbox-12244
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hide WIFI password ### NetBox version v3.4.3 ### Feature type Change to existing functionality ### Proposed functionality Is it possible to hide the WIFI password as well? As now when you add the password it is added as plain-text. Can this be a hidden password with "*********" and icon on the side to make it visible? Or does it needs to be moved to "secret(store) plugin? ### Use case Well i think passwords dont need to be visible until you want it to be. The best part is that you ofcourse need to be logged in to see this. But some think this needs to be secure and im not sure if this is also plain text in the database itself? ### Database changes Probally yes encrypted and hidden ### External dependencies Maybe the secret plugin. </issue> <code> [start of netbox/wireless/forms/model_forms.py] 1 from django.utils.translation import gettext as _ 2 from dcim.models import Device, Interface, Location, Region, Site, SiteGroup 3 from ipam.models import VLAN, VLANGroup 4 from netbox.forms import NetBoxModelForm 5 from tenancy.forms import TenancyForm 6 from utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect 7 from wireless.models import * 8 9 __all__ = ( 10 'WirelessLANForm', 11 'WirelessLANGroupForm', 12 'WirelessLinkForm', 13 ) 14 15 16 class WirelessLANGroupForm(NetBoxModelForm): 17 parent = DynamicModelChoiceField( 18 queryset=WirelessLANGroup.objects.all(), 19 required=False 20 ) 21 slug = SlugField() 22 23 fieldsets = ( 24 ('Wireless LAN Group', ( 25 'parent', 'name', 'slug', 'description', 'tags', 26 )), 27 ) 28 29 class Meta: 30 model = WirelessLANGroup 31 fields = [ 32 'parent', 'name', 'slug', 'description', 'tags', 33 ] 34 35 36 class WirelessLANForm(TenancyForm, NetBoxModelForm): 37 group = DynamicModelChoiceField( 38 queryset=WirelessLANGroup.objects.all(), 39 required=False 40 ) 41 region = DynamicModelChoiceField( 42 queryset=Region.objects.all(), 43 required=False, 44 initial_params={ 45 'sites': '$site' 46 } 47 ) 48 site_group = DynamicModelChoiceField( 49 queryset=SiteGroup.objects.all(), 50 required=False, 51 initial_params={ 52 'sites': '$site' 53 } 54 ) 55 site = DynamicModelChoiceField( 56 queryset=Site.objects.all(), 57 required=False, 58 null_option='None', 59 query_params={ 60 'region_id': '$region', 61 'group_id': '$site_group', 62 } 63 ) 64 vlan_group = DynamicModelChoiceField( 65 queryset=VLANGroup.objects.all(), 66 required=False, 67 label=_('VLAN group'), 68 null_option='None', 69 query_params={ 70 'site': '$site' 71 }, 72 initial_params={ 73 'vlans': '$vlan' 74 } 75 ) 76 vlan = DynamicModelChoiceField( 77 queryset=VLAN.objects.all(), 78 required=False, 79 label=_('VLAN'), 80 query_params={ 81 'site_id': '$site', 82 'group_id': '$vlan_group', 83 } 84 ) 85 comments = CommentField() 86 87 fieldsets = ( 88 ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')), 89 ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)), 90 ('Tenancy', ('tenant_group', 'tenant')), 91 ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')), 92 ) 93 94 class Meta: 95 model = WirelessLAN 96 fields = [ 97 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant', 98 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags', 99 ] 100 widgets = { 101 'status': StaticSelect, 102 'auth_type': StaticSelect, 103 'auth_cipher': StaticSelect, 104 } 105 106 107 class WirelessLinkForm(TenancyForm, NetBoxModelForm): 108 site_a = DynamicModelChoiceField( 109 queryset=Site.objects.all(), 110 required=False, 111 label=_('Site'), 112 initial_params={ 113 'devices': '$device_a', 114 } 115 ) 116 location_a = DynamicModelChoiceField( 117 queryset=Location.objects.all(), 118 query_params={ 119 'site_id': '$site_a', 120 }, 121 required=False, 122 label=_('Location'), 123 initial_params={ 124 'devices': '$device_a', 125 } 126 ) 127 device_a = DynamicModelChoiceField( 128 queryset=Device.objects.all(), 129 query_params={ 130 'site_id': '$site_a', 131 'location_id': '$location_a', 132 }, 133 required=False, 134 label=_('Device'), 135 initial_params={ 136 'interfaces': '$interface_a' 137 } 138 ) 139 interface_a = DynamicModelChoiceField( 140 queryset=Interface.objects.all(), 141 query_params={ 142 'kind': 'wireless', 143 'device_id': '$device_a', 144 }, 145 disabled_indicator='_occupied', 146 label=_('Interface') 147 ) 148 site_b = DynamicModelChoiceField( 149 queryset=Site.objects.all(), 150 required=False, 151 label=_('Site'), 152 initial_params={ 153 'devices': '$device_b', 154 } 155 ) 156 location_b = DynamicModelChoiceField( 157 queryset=Location.objects.all(), 158 query_params={ 159 'site_id': '$site_b', 160 }, 161 required=False, 162 label=_('Location'), 163 initial_params={ 164 'devices': '$device_b', 165 } 166 ) 167 device_b = DynamicModelChoiceField( 168 queryset=Device.objects.all(), 169 query_params={ 170 'site_id': '$site_b', 171 'location_id': '$location_b', 172 }, 173 required=False, 174 label=_('Device'), 175 initial_params={ 176 'interfaces': '$interface_b' 177 } 178 ) 179 interface_b = DynamicModelChoiceField( 180 queryset=Interface.objects.all(), 181 query_params={ 182 'kind': 'wireless', 183 'device_id': '$device_b', 184 }, 185 disabled_indicator='_occupied', 186 label=_('Interface') 187 ) 188 comments = CommentField() 189 190 fieldsets = ( 191 ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')), 192 ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')), 193 ('Link', ('status', 'ssid', 'description', 'tags')), 194 ('Tenancy', ('tenant_group', 'tenant')), 195 ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')), 196 ) 197 198 class Meta: 199 model = WirelessLink 200 fields = [ 201 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b', 202 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description', 203 'comments', 'tags', 204 ] 205 widgets = { 206 'status': StaticSelect, 207 'auth_type': StaticSelect, 208 'auth_cipher': StaticSelect, 209 } 210 labels = { 211 'auth_type': 'Type', 212 'auth_cipher': 'Cipher', 213 } 214 [end of netbox/wireless/forms/model_forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/wireless/forms/model_forms.py b/netbox/wireless/forms/model_forms.py --- a/netbox/wireless/forms/model_forms.py +++ b/netbox/wireless/forms/model_forms.py @@ -1,3 +1,4 @@ +from django.forms import PasswordInput from django.utils.translation import gettext as _ from dcim.models import Device, Interface, Location, Region, Site, SiteGroup from ipam.models import VLAN, VLANGroup @@ -101,6 +102,10 @@ 'status': StaticSelect, 'auth_type': StaticSelect, 'auth_cipher': StaticSelect, + 'auth_psk': PasswordInput( + render_value=True, + attrs={'data-toggle': 'password'} + ), } @@ -206,6 +211,10 @@ 'status': StaticSelect, 'auth_type': StaticSelect, 'auth_cipher': StaticSelect, + 'auth_psk': PasswordInput( + render_value=True, + attrs={'data-toggle': 'password'} + ), } labels = { 'auth_type': 'Type',
{"golden_diff": "diff --git a/netbox/wireless/forms/model_forms.py b/netbox/wireless/forms/model_forms.py\n--- a/netbox/wireless/forms/model_forms.py\n+++ b/netbox/wireless/forms/model_forms.py\n@@ -1,3 +1,4 @@\n+from django.forms import PasswordInput\n from django.utils.translation import gettext as _\n from dcim.models import Device, Interface, Location, Region, Site, SiteGroup\n from ipam.models import VLAN, VLANGroup\n@@ -101,6 +102,10 @@\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n+ 'auth_psk': PasswordInput(\n+ render_value=True,\n+ attrs={'data-toggle': 'password'}\n+ ),\n }\n \n \n@@ -206,6 +211,10 @@\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n+ 'auth_psk': PasswordInput(\n+ render_value=True,\n+ attrs={'data-toggle': 'password'}\n+ ),\n }\n labels = {\n 'auth_type': 'Type',\n", "issue": "Hide WIFI password\n### NetBox version\n\nv3.4.3\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nIs it possible to hide the WIFI password as well? As now when you add the password it is added as plain-text.\r\nCan this be a hidden password with \"*********\" and icon on the side to make it visible?\r\n\r\nOr does it needs to be moved to \"secret(store) plugin?\r\n\r\n\n\n### Use case\n\nWell i think passwords dont need to be visible until you want it to be. \r\nThe best part is that you ofcourse need to be logged in to see this. \r\nBut some think this needs to be secure and im not sure if this is also plain text in the database itself? \n\n### Database changes\n\nProbally yes encrypted and hidden\n\n### External dependencies\n\nMaybe the secret plugin.\n", "before_files": [{"content": "from django.utils.translation import gettext as _\nfrom dcim.models import Device, Interface, Location, Region, Site, SiteGroup\nfrom ipam.models import VLAN, VLANGroup\nfrom netbox.forms import NetBoxModelForm\nfrom tenancy.forms import TenancyForm\nfrom utilities.forms import CommentField, DynamicModelChoiceField, SlugField, StaticSelect\nfrom wireless.models import *\n\n__all__ = (\n 'WirelessLANForm',\n 'WirelessLANGroupForm',\n 'WirelessLinkForm',\n)\n\n\nclass WirelessLANGroupForm(NetBoxModelForm):\n parent = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False\n )\n slug = SlugField()\n\n fieldsets = (\n ('Wireless LAN Group', (\n 'parent', 'name', 'slug', 'description', 'tags',\n )),\n )\n\n class Meta:\n model = WirelessLANGroup\n fields = [\n 'parent', 'name', 'slug', 'description', 'tags',\n ]\n\n\nclass WirelessLANForm(TenancyForm, NetBoxModelForm):\n group = DynamicModelChoiceField(\n queryset=WirelessLANGroup.objects.all(),\n required=False\n )\n region = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site_group = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n required=False,\n initial_params={\n 'sites': '$site'\n }\n )\n site = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n null_option='None',\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label=_('VLAN group'),\n null_option='None',\n query_params={\n 'site': '$site'\n },\n initial_params={\n 'vlans': '$vlan'\n }\n )\n vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n label=_('VLAN'),\n query_params={\n 'site_id': '$site',\n 'group_id': '$vlan_group',\n }\n )\n comments = CommentField()\n\n fieldsets = (\n ('Wireless LAN', ('ssid', 'group', 'status', 'description', 'tags')),\n ('VLAN', ('region', 'site_group', 'site', 'vlan_group', 'vlan',)),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n )\n\n class Meta:\n model = WirelessLAN\n fields = [\n 'ssid', 'group', 'region', 'site_group', 'site', 'status', 'vlan_group', 'vlan', 'tenant_group', 'tenant',\n 'auth_type', 'auth_cipher', 'auth_psk', 'description', 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n }\n\n\nclass WirelessLinkForm(TenancyForm, NetBoxModelForm):\n site_a = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n label=_('Site'),\n initial_params={\n 'devices': '$device_a',\n }\n )\n location_a = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n query_params={\n 'site_id': '$site_a',\n },\n required=False,\n label=_('Location'),\n initial_params={\n 'devices': '$device_a',\n }\n )\n device_a = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site_a',\n 'location_id': '$location_a',\n },\n required=False,\n label=_('Device'),\n initial_params={\n 'interfaces': '$interface_a'\n }\n )\n interface_a = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n query_params={\n 'kind': 'wireless',\n 'device_id': '$device_a',\n },\n disabled_indicator='_occupied',\n label=_('Interface')\n )\n site_b = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n required=False,\n label=_('Site'),\n initial_params={\n 'devices': '$device_b',\n }\n )\n location_b = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n query_params={\n 'site_id': '$site_b',\n },\n required=False,\n label=_('Location'),\n initial_params={\n 'devices': '$device_b',\n }\n )\n device_b = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n query_params={\n 'site_id': '$site_b',\n 'location_id': '$location_b',\n },\n required=False,\n label=_('Device'),\n initial_params={\n 'interfaces': '$interface_b'\n }\n )\n interface_b = DynamicModelChoiceField(\n queryset=Interface.objects.all(),\n query_params={\n 'kind': 'wireless',\n 'device_id': '$device_b',\n },\n disabled_indicator='_occupied',\n label=_('Interface')\n )\n comments = CommentField()\n\n fieldsets = (\n ('Side A', ('site_a', 'location_a', 'device_a', 'interface_a')),\n ('Side B', ('site_b', 'location_b', 'device_b', 'interface_b')),\n ('Link', ('status', 'ssid', 'description', 'tags')),\n ('Tenancy', ('tenant_group', 'tenant')),\n ('Authentication', ('auth_type', 'auth_cipher', 'auth_psk')),\n )\n\n class Meta:\n model = WirelessLink\n fields = [\n 'site_a', 'location_a', 'device_a', 'interface_a', 'site_b', 'location_b', 'device_b', 'interface_b',\n 'status', 'ssid', 'tenant_group', 'tenant', 'auth_type', 'auth_cipher', 'auth_psk', 'description',\n 'comments', 'tags',\n ]\n widgets = {\n 'status': StaticSelect,\n 'auth_type': StaticSelect,\n 'auth_cipher': StaticSelect,\n }\n labels = {\n 'auth_type': 'Type',\n 'auth_cipher': 'Cipher',\n }\n", "path": "netbox/wireless/forms/model_forms.py"}]}
2,617
253
gh_patches_debug_28272
rasdani/github-patches
git_diff
googleapis__python-bigquery-89
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove test_utils directory Now that test utils are available in a GitHub repo, a local `test_utils` copy is not needed anymore and should be removed. </issue> <code> [start of noxfile.py] 1 # Copyright 2016 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 17 import os 18 import shutil 19 20 import nox 21 22 23 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py") 24 25 26 def default(session): 27 """Default unit test session. 28 29 This is intended to be run **without** an interpreter set, so 30 that the current ``python`` (on the ``PATH``) or the version of 31 Python corresponding to the ``nox`` binary the ``PATH`` can 32 run the tests. 33 """ 34 # Install all test dependencies, then install local packages in-place. 35 session.install("mock", "pytest", "pytest-cov", "freezegun") 36 session.install("grpcio") 37 session.install("-e", "test_utils") 38 39 # fastparquet is not included in .[all] because, in general, it's redundant 40 # with pyarrow. We still want to run some unit tests with fastparquet 41 # serialization, though. 42 session.install("-e", ".[all,fastparquet]") 43 44 # IPython does not support Python 2 after version 5.x 45 if session.python == "2.7": 46 session.install("ipython==5.5") 47 else: 48 session.install("ipython") 49 50 # Run py.test against the unit tests. 51 session.run( 52 "py.test", 53 "--quiet", 54 "--cov=google.cloud.bigquery", 55 "--cov=tests.unit", 56 "--cov-append", 57 "--cov-config=.coveragerc", 58 "--cov-report=", 59 "--cov-fail-under=0", 60 os.path.join("tests", "unit"), 61 *session.posargs, 62 ) 63 64 65 @nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) 66 def unit(session): 67 """Run the unit test suite.""" 68 default(session) 69 70 71 @nox.session(python=["2.7", "3.8"]) 72 def system(session): 73 """Run the system test suite.""" 74 75 # Sanity check: Only run system tests if the environment variable is set. 76 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): 77 session.skip("Credentials must be set via environment variable.") 78 79 # Use pre-release gRPC for system tests. 80 session.install("--pre", "grpcio") 81 82 # Install all test dependencies, then install local packages in place. 83 session.install("mock", "pytest", "psutil") 84 session.install("google-cloud-storage") 85 session.install("fastavro") 86 session.install("-e", "test_utils") 87 session.install("-e", ".[all]") 88 89 # IPython does not support Python 2 after version 5.x 90 if session.python == "2.7": 91 session.install("ipython==5.5") 92 else: 93 session.install("ipython") 94 95 # Run py.test against the system tests. 96 session.run( 97 "py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs 98 ) 99 100 101 @nox.session(python=["2.7", "3.8"]) 102 def snippets(session): 103 """Run the snippets test suite.""" 104 105 # Sanity check: Only run snippets tests if the environment variable is set. 106 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): 107 session.skip("Credentials must be set via environment variable.") 108 109 # Install all test dependencies, then install local packages in place. 110 session.install("mock", "pytest") 111 session.install("google-cloud-storage") 112 session.install("grpcio") 113 session.install("-e", "test_utils") 114 session.install("-e", ".[all]") 115 116 # Run py.test against the snippets tests. 117 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs) 118 session.run("py.test", "samples", *session.posargs) 119 120 121 @nox.session(python="3.8") 122 def cover(session): 123 """Run the final coverage report. 124 125 This outputs the coverage report aggregating coverage from the unit 126 test runs (not system test runs), and then erases coverage data. 127 """ 128 session.install("coverage", "pytest-cov") 129 session.run("coverage", "report", "--show-missing", "--fail-under=100") 130 session.run("coverage", "erase") 131 132 133 @nox.session(python="3.8") 134 def lint(session): 135 """Run linters. 136 137 Returns a failure if the linters find linting errors or sufficiently 138 serious code quality issues. 139 """ 140 141 session.install("black", "flake8") 142 session.install("-e", ".") 143 session.run("flake8", os.path.join("google", "cloud", "bigquery")) 144 session.run("flake8", "tests") 145 session.run("flake8", os.path.join("docs", "samples")) 146 session.run("flake8", os.path.join("docs", "snippets.py")) 147 session.run("black", "--check", *BLACK_PATHS) 148 149 150 @nox.session(python="3.8") 151 def lint_setup_py(session): 152 """Verify that setup.py is valid (including RST check).""" 153 154 session.install("docutils", "Pygments") 155 session.run("python", "setup.py", "check", "--restructuredtext", "--strict") 156 157 158 @nox.session(python="3.6") 159 def blacken(session): 160 """Run black. 161 Format code to uniform standard. 162 163 This currently uses Python 3.6 due to the automated Kokoro run of synthtool. 164 That run uses an image that doesn't have 3.6 installed. Before updating this 165 check the state of the `gcp_ubuntu_config` we use for that Kokoro run. 166 """ 167 session.install("black") 168 session.run("black", *BLACK_PATHS) 169 170 171 @nox.session(python="3.8") 172 def docs(session): 173 """Build the docs.""" 174 175 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme") 176 session.install("google-cloud-storage") 177 session.install("-e", ".[all]") 178 179 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) 180 session.run( 181 "sphinx-build", 182 "-W", # warnings as errors 183 "-T", # show full traceback on exception 184 "-N", # no colors 185 "-b", 186 "html", 187 "-d", 188 os.path.join("docs", "_build", "doctrees", ""), 189 os.path.join("docs", ""), 190 os.path.join("docs", "_build", "html", ""), 191 ) 192 [end of noxfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/noxfile.py b/noxfile.py --- a/noxfile.py +++ b/noxfile.py @@ -34,7 +34,7 @@ # Install all test dependencies, then install local packages in-place. session.install("mock", "pytest", "pytest-cov", "freezegun") session.install("grpcio") - session.install("-e", "test_utils") + session.install("git+https://github.com/googleapis/python-test-utils") # fastparquet is not included in .[all] because, in general, it's redundant # with pyarrow. We still want to run some unit tests with fastparquet @@ -83,7 +83,7 @@ session.install("mock", "pytest", "psutil") session.install("google-cloud-storage") session.install("fastavro") - session.install("-e", "test_utils") + session.install("git+https://github.com/googleapis/python-test-utils") session.install("-e", ".[all]") # IPython does not support Python 2 after version 5.x @@ -110,7 +110,7 @@ session.install("mock", "pytest") session.install("google-cloud-storage") session.install("grpcio") - session.install("-e", "test_utils") + session.install("git+https://github.com/googleapis/python-test-utils") session.install("-e", ".[all]") # Run py.test against the snippets tests.
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -34,7 +34,7 @@\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n \n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n@@ -83,7 +83,7 @@\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n \n # IPython does not support Python 2 after version 5.x\n@@ -110,7 +110,7 @@\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n- session.install(\"-e\", \"test_utils\")\n+ session.install(\"git+https://github.com/googleapis/python-test-utils\")\n session.install(\"-e\", \".[all]\")\n \n # Run py.test against the snippets tests.\n", "issue": "Remove test_utils directory\nNow that test utils are available in a GitHub repo, a local `test_utils` copy is not needed anymore and should be removed.\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n session.install(\"-e\", \".[all,fastparquet]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", *session.posargs)\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"black\", \"flake8\")\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(\"black\")\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
2,599
334
gh_patches_debug_42985
rasdani/github-patches
git_diff
pytorch__vision-914
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> STL-10 Testing Protocol Class STL10 does not support recommended testing protocol. See STL-10 official page: https://cs.stanford.edu/~acoates/stl10/ </issue> <code> [start of torchvision/datasets/stl10.py] 1 from __future__ import print_function 2 from PIL import Image 3 import os 4 import os.path 5 import numpy as np 6 from .cifar import CIFAR10 7 8 9 class STL10(CIFAR10): 10 """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset. 11 12 Args: 13 root (string): Root directory of dataset where directory 14 ``stl10_binary`` exists. 15 split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}. 16 Accordingly dataset is selected. 17 transform (callable, optional): A function/transform that takes in an PIL image 18 and returns a transformed version. E.g, ``transforms.RandomCrop`` 19 target_transform (callable, optional): A function/transform that takes in the 20 target and transforms it. 21 download (bool, optional): If true, downloads the dataset from the internet and 22 puts it in root directory. If dataset is already downloaded, it is not 23 downloaded again. 24 25 """ 26 base_folder = 'stl10_binary' 27 url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz" 28 filename = "stl10_binary.tar.gz" 29 tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb' 30 class_names_file = 'class_names.txt' 31 train_list = [ 32 ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], 33 ['train_y.bin', '5a34089d4802c674881badbb80307741'], 34 ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4'] 35 ] 36 37 test_list = [ 38 ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'], 39 ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e'] 40 ] 41 splits = ('train', 'train+unlabeled', 'unlabeled', 'test') 42 43 def __init__(self, root, split='train', 44 transform=None, target_transform=None, download=False): 45 if split not in self.splits: 46 raise ValueError('Split "{}" not found. Valid splits are: {}'.format( 47 split, ', '.join(self.splits), 48 )) 49 self.root = os.path.expanduser(root) 50 self.transform = transform 51 self.target_transform = target_transform 52 self.split = split # train/test/unlabeled set 53 54 if download: 55 self.download() 56 57 if not self._check_integrity(): 58 raise RuntimeError( 59 'Dataset not found or corrupted. ' 60 'You can use download=True to download it') 61 62 # now load the picked numpy arrays 63 if self.split == 'train': 64 self.data, self.labels = self.__loadfile( 65 self.train_list[0][0], self.train_list[1][0]) 66 elif self.split == 'train+unlabeled': 67 self.data, self.labels = self.__loadfile( 68 self.train_list[0][0], self.train_list[1][0]) 69 unlabeled_data, _ = self.__loadfile(self.train_list[2][0]) 70 self.data = np.concatenate((self.data, unlabeled_data)) 71 self.labels = np.concatenate( 72 (self.labels, np.asarray([-1] * unlabeled_data.shape[0]))) 73 74 elif self.split == 'unlabeled': 75 self.data, _ = self.__loadfile(self.train_list[2][0]) 76 self.labels = np.asarray([-1] * self.data.shape[0]) 77 else: # self.split == 'test': 78 self.data, self.labels = self.__loadfile( 79 self.test_list[0][0], self.test_list[1][0]) 80 81 class_file = os.path.join( 82 self.root, self.base_folder, self.class_names_file) 83 if os.path.isfile(class_file): 84 with open(class_file) as f: 85 self.classes = f.read().splitlines() 86 87 def __getitem__(self, index): 88 """ 89 Args: 90 index (int): Index 91 92 Returns: 93 tuple: (image, target) where target is index of the target class. 94 """ 95 if self.labels is not None: 96 img, target = self.data[index], int(self.labels[index]) 97 else: 98 img, target = self.data[index], None 99 100 # doing this so that it is consistent with all other datasets 101 # to return a PIL Image 102 img = Image.fromarray(np.transpose(img, (1, 2, 0))) 103 104 if self.transform is not None: 105 img = self.transform(img) 106 107 if self.target_transform is not None: 108 target = self.target_transform(target) 109 110 return img, target 111 112 def __len__(self): 113 return self.data.shape[0] 114 115 def __loadfile(self, data_file, labels_file=None): 116 labels = None 117 if labels_file: 118 path_to_labels = os.path.join( 119 self.root, self.base_folder, labels_file) 120 with open(path_to_labels, 'rb') as f: 121 labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based 122 123 path_to_data = os.path.join(self.root, self.base_folder, data_file) 124 with open(path_to_data, 'rb') as f: 125 # read whole file in uint8 chunks 126 everything = np.fromfile(f, dtype=np.uint8) 127 images = np.reshape(everything, (-1, 3, 96, 96)) 128 images = np.transpose(images, (0, 1, 3, 2)) 129 130 return images, labels 131 132 def extra_repr(self): 133 return "Split: {split}".format(**self.__dict__) 134 [end of torchvision/datasets/stl10.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py --- a/torchvision/datasets/stl10.py +++ b/torchvision/datasets/stl10.py @@ -14,6 +14,9 @@ ``stl10_binary`` exists. split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}. Accordingly dataset is selected. + folds (int, optional): One of {0-9} or None. + For training, loads one of the 10 pre-defined folds of 1k samples for the + standard evaluation procedure. If no value is passed, loads the 5k samples. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the @@ -28,6 +31,7 @@ filename = "stl10_binary.tar.gz" tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb' class_names_file = 'class_names.txt' + folds_list_file = 'fold_indices.txt' train_list = [ ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], ['train_y.bin', '5a34089d4802c674881badbb80307741'], @@ -40,7 +44,7 @@ ] splits = ('train', 'train+unlabeled', 'unlabeled', 'test') - def __init__(self, root, split='train', + def __init__(self, root, split='train', folds=None, transform=None, target_transform=None, download=False): if split not in self.splits: raise ValueError('Split "{}" not found. Valid splits are: {}'.format( @@ -50,6 +54,7 @@ self.transform = transform self.target_transform = target_transform self.split = split # train/test/unlabeled set + self.folds = folds # one of the 10 pre-defined folds or the full dataset if download: self.download() @@ -63,9 +68,12 @@ if self.split == 'train': self.data, self.labels = self.__loadfile( self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + elif self.split == 'train+unlabeled': self.data, self.labels = self.__loadfile( self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) unlabeled_data, _ = self.__loadfile(self.train_list[2][0]) self.data = np.concatenate((self.data, unlabeled_data)) self.labels = np.concatenate( @@ -131,3 +139,16 @@ def extra_repr(self): return "Split: {split}".format(**self.__dict__) + + def __load_folds(self, folds): + # loads one of the folds if specified + if isinstance(folds, int): + if folds >= 0 and folds < 10: + path_to_folds = os.path.join( + self.root, self.base_folder, self.folds_list_file) + with open(path_to_folds, 'r') as f: + str_idx = f.read().splitlines()[folds] + list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ') + self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx] + else: + raise ValueError('Folds "{}" not found. Valid splits are: 0-9.'.format(folds))
{"golden_diff": "diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py\n--- a/torchvision/datasets/stl10.py\n+++ b/torchvision/datasets/stl10.py\n@@ -14,6 +14,9 @@\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n+ folds (int, optional): One of {0-9} or None.\n+ For training, loads one of the 10 pre-defined folds of 1k samples for the\n+ standard evaluation procedure. If no value is passed, loads the 5k samples.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n@@ -28,6 +31,7 @@\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n+ folds_list_file = 'fold_indices.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n@@ -40,7 +44,7 @@\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n \n- def __init__(self, root, split='train',\n+ def __init__(self, root, split='train', folds=None,\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n@@ -50,6 +54,7 @@\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n+ self.folds = folds # one of the 10 pre-defined folds or the full dataset\n \n if download:\n self.download()\n@@ -63,9 +68,12 @@\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n+\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n@@ -131,3 +139,16 @@\n \n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n+\n+ def __load_folds(self, folds):\n+ # loads one of the folds if specified\n+ if isinstance(folds, int):\n+ if folds >= 0 and folds < 10:\n+ path_to_folds = os.path.join(\n+ self.root, self.base_folder, self.folds_list_file)\n+ with open(path_to_folds, 'r') as f:\n+ str_idx = f.read().splitlines()[folds]\n+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\n+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\n+ else:\n+ raise ValueError('Folds \"{}\" not found. Valid splits are: 0-9.'.format(folds))\n", "issue": "STL-10 Testing Protocol\nClass STL10 does not support recommended testing protocol. \r\nSee STL-10 official page: https://cs.stanford.edu/~acoates/stl10/\n", "before_files": [{"content": "from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom .cifar import CIFAR10\n\n\nclass STL10(CIFAR10):\n \"\"\"`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n base_folder = 'stl10_binary'\n url = \"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\"\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\n ]\n\n test_list = [\n ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\n ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n\n def __init__(self, root, split='train',\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n split, ', '.join(self.splits),\n ))\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n 'Dataset not found or corrupted. '\n 'You can use download=True to download it')\n\n # now load the picked numpy arrays\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\n\n elif self.split == 'unlabeled':\n self.data, _ = self.__loadfile(self.train_list[2][0])\n self.labels = np.asarray([-1] * self.data.shape[0])\n else: # self.split == 'test':\n self.data, self.labels = self.__loadfile(\n self.test_list[0][0], self.test_list[1][0])\n\n class_file = os.path.join(\n self.root, self.base_folder, self.class_names_file)\n if os.path.isfile(class_file):\n with open(class_file) as f:\n self.classes = f.read().splitlines()\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.labels is not None:\n img, target = self.data[index], int(self.labels[index])\n else:\n img, target = self.data[index], None\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return self.data.shape[0]\n\n def __loadfile(self, data_file, labels_file=None):\n labels = None\n if labels_file:\n path_to_labels = os.path.join(\n self.root, self.base_folder, labels_file)\n with open(path_to_labels, 'rb') as f:\n labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\n\n path_to_data = os.path.join(self.root, self.base_folder, data_file)\n with open(path_to_data, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 1, 3, 2))\n\n return images, labels\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n", "path": "torchvision/datasets/stl10.py"}]}
2,229
910
gh_patches_debug_15580
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-590
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Help documentation returns error for "min_profile" and "max_profile" ### Extension name (the extension in question) storage-preview ### Description of issue (in as much detail as possible) Returns the following error when prompting for help via `az storage account -h` ``` Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'. Traceback (most recent call last): File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\cli.py", line 206, in invoke File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\commands\__init__.py", line 276, in execute File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1734, in parse_args args, argv = self.parse_known_args(args, namespace) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1766, in parse_known_args namespace, args = self._parse_known_args(args, namespace) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1954, in _parse_known_args positionals_end_index = consume_positionals(start_index) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1931, in consume_positionals take_action(action, args) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1840, in take_action action(self, namespace, argument_values, option_string) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1137, in __call__ subnamespace, arg_strings = parser.parse_known_args(arg_strings, None) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1766, in parse_known_args namespace, args = self._parse_known_args(args, namespace) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1954, in _parse_known_args positionals_end_index = consume_positionals(start_index) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1931, in consume_positionals take_action(action, args) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1840, in take_action action(self, namespace, argument_values, option_string) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1137, in __call__ subnamespace, arg_strings = parser.parse_known_args(arg_strings, None) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1766, in parse_known_args namespace, args = self._parse_known_args(args, namespace) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1972, in _parse_known_args start_index = consume_optional(start_index) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1912, in consume_optional take_action(action, args, option_string) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1840, in take_action action(self, namespace, argument_values, option_string) File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 1024, in __call__ parser.print_help() File "C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\lib\argparse.py", line 2366, in print_help self._print_message(self.format_help(), file) File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\parser.py", line 154, in format_help File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\parser.py", line 225, in format_help File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 146, in show_help File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 664, in show_help File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 219, in __init__ File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 240, in load File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help_loaders.py", line 152, in versioned_load File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 178, in load File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\knack\knack\help.py", line 183, in _load_from_file File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 234, in _load_from_data File "C:\Users\VSSADM~1\AppData\Local\Temp\pip-install-moqk8ce9\azure-cli-core\azure\cli\core\_help.py", line 201, in _should_include_example knack.help.HelpAuthoringException: Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'. ``` ----- </issue> <code> [start of src/storage-preview/azext_storage_preview/_help.py] 1 # coding=utf-8 2 # -------------------------------------------------------------------------------------------- 3 # Copyright (c) Microsoft Corporation. All rights reserved. 4 # Licensed under the MIT License. See License.txt in the project root for license information. 5 # -------------------------------------------------------------------------------------------- 6 7 from knack.help_files import helps 8 9 # pylint: disable=line-too-long, too-many-lines 10 11 helps['storage account create'] = """ 12 type: command 13 short-summary: Create a storage account. 14 long-summary: > 15 The SKU of the storage account defaults to 'Standard_RAGRS'. 16 examples: 17 - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage. 18 text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS 19 min_profile: latest 20 - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage. 21 text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS 22 max_profile: 2017-03-09-profile 23 """ 24 25 helps['storage account update'] = """ 26 type: command 27 short-summary: Update the properties of a storage account. 28 """ 29 30 helps['storage blob service-properties'] = """ 31 type: group 32 short-summary: Manage storage blob service properties. 33 """ 34 35 helps['storage blob service-properties update'] = """ 36 type: command 37 short-summary: Update storage blob service properties. 38 """ 39 40 helps['storage account management-policy'] = """ 41 type: group 42 short-summary: Manage storage account management policies. 43 """ 44 45 helps['storage account management-policy create'] = """ 46 type: command 47 short-summary: Creates the data policy rules associated with the specified storage account. 48 """ 49 50 helps['storage account management-policy update'] = """ 51 type: command 52 short-summary: Updates the data policy rules associated with the specified storage account. 53 """ 54 55 helps['storage azcopy'] = """ 56 type: group 57 short-summary: | 58 [EXPERIMENTAL] Manage storage operations utilizing AzCopy. 59 long-summary: | 60 Open issues here: https://github.com/Azure/azure-storage-azcopy 61 """ 62 63 helps['storage azcopy blob'] = """ 64 type: group 65 short-summary: Manage object storage for unstructured data (blobs) using AzCopy. 66 """ 67 68 helps['storage azcopy blob upload'] = """ 69 type: command 70 short-summary: Upload blobs to a storage blob container using AzCopy. 71 examples: 72 - name: Upload a single blob to a container. 73 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/file" -d NewBlob 74 - name: Upload a directory to a container. 75 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory" --recursive 76 - name: Upload the contents of a directory to a container. 77 text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory/*" --recursive 78 """ 79 80 helps['storage azcopy blob download'] = """ 81 type: command 82 short-summary: Download blobs from a storage blob container using AzCopy. 83 examples: 84 - name: Download a single blob from a container. 85 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s "path/to/blob" -d "path/to/file" 86 - name: Download a virtual directory from a container. 87 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s "path/to/virtual_directory" -d "download/path" --recursive 88 - name: Download the contents of a container onto a local file system. 89 text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d "download/path" --recursive 90 """ 91 92 helps['storage azcopy blob delete'] = """ 93 type: command 94 short-summary: Delete blobs from a storage blob container using AzCopy. 95 examples: 96 - name: Delete a single blob from a container. 97 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob 98 - name: Delete all blobs from a container. 99 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive 100 - name: Delete all blobs in a virtual directory. 101 text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t "path/to/virtual_directory" --recursive 102 """ 103 104 helps['storage azcopy run-command'] = """ 105 type: command 106 short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication. 107 """ 108 [end of src/storage-preview/azext_storage_preview/_help.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py --- a/src/storage-preview/azext_storage_preview/_help.py +++ b/src/storage-preview/azext_storage_preview/_help.py @@ -16,10 +16,6 @@ examples: - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage. text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS - min_profile: latest - - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage. - text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS - max_profile: 2017-03-09-profile """ helps['storage account update'] = """
{"golden_diff": "diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py\n--- a/src/storage-preview/azext_storage_preview/_help.py\n+++ b/src/storage-preview/azext_storage_preview/_help.py\n@@ -16,10 +16,6 @@\n examples:\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n- min_profile: latest\n- - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n- text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\n- max_profile: 2017-03-09-profile\n \"\"\"\n \n helps['storage account update'] = \"\"\"\n", "issue": "Help documentation returns error for \"min_profile\" and \"max_profile\"\n### Extension name (the extension in question)\r\nstorage-preview\r\n\r\n### Description of issue (in as much detail as possible)\r\nReturns the following error when prompting for help via `az storage account -h`\r\n\r\n```\r\nHelp entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\cli.py\", line 206, in invoke\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\commands\\__init__.py\", line 276, in execute\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1734, in parse_args\r\n args, argv = self.parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1954, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1931, in consume_positionals\r\n take_action(action, args)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1137, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1954, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1931, in consume_positionals\r\n take_action(action, args)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1137, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1766, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1972, in _parse_known_args\r\n start_index = consume_optional(start_index)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1912, in consume_optional\r\n take_action(action, args, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1840, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 1024, in __call__\r\n parser.print_help()\r\n File \"C:\\Program Files (x86)\\Microsoft SDKs\\Azure\\CLI2\\lib\\argparse.py\", line 2366, in print_help\r\n self._print_message(self.format_help(), file)\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\parser.py\", line 154, in format_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\parser.py\", line 225, in format_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 146, in show_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 664, in show_help\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 219, in __init__\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 240, in load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help_loaders.py\", line 152, in versioned_load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 178, in load\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\knack\\knack\\help.py\", line 183, in _load_from_file\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 234, in _load_from_data\r\n File \"C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\pip-install-moqk8ce9\\azure-cli-core\\azure\\cli\\core\\_help.py\", line 201, in _should_include_example\r\nknack.help.HelpAuthoringException: Help entry fields 'min_profile' and 'max_profile' are no longer supported. Please use 'supported-profiles' or 'unsupported-profiles'.\r\n```\r\n-----\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n# pylint: disable=line-too-long, too-many-lines\n\nhelps['storage account create'] = \"\"\"\n type: command\n short-summary: Create a storage account.\n long-summary: >\n The SKU of the storage account defaults to 'Standard_RAGRS'.\n examples:\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS\n min_profile: latest\n - name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.\n text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS\n max_profile: 2017-03-09-profile\n\"\"\"\n\nhelps['storage account update'] = \"\"\"\n type: command\n short-summary: Update the properties of a storage account.\n\"\"\"\n\nhelps['storage blob service-properties'] = \"\"\"\n type: group\n short-summary: Manage storage blob service properties.\n\"\"\"\n\nhelps['storage blob service-properties update'] = \"\"\"\n type: command\n short-summary: Update storage blob service properties.\n\"\"\"\n\nhelps['storage account management-policy'] = \"\"\"\n type: group\n short-summary: Manage storage account management policies.\n\"\"\"\n\nhelps['storage account management-policy create'] = \"\"\"\n type: command\n short-summary: Creates the data policy rules associated with the specified storage account.\n\"\"\"\n\nhelps['storage account management-policy update'] = \"\"\"\n type: command\n short-summary: Updates the data policy rules associated with the specified storage account.\n\"\"\"\n\nhelps['storage azcopy'] = \"\"\"\n type: group\n short-summary: |\n [EXPERIMENTAL] Manage storage operations utilizing AzCopy.\n long-summary: |\n Open issues here: https://github.com/Azure/azure-storage-azcopy\n\"\"\"\n\nhelps['storage azcopy blob'] = \"\"\"\n type: group\n short-summary: Manage object storage for unstructured data (blobs) using AzCopy.\n\"\"\"\n\nhelps['storage azcopy blob upload'] = \"\"\"\n type: command\n short-summary: Upload blobs to a storage blob container using AzCopy.\n examples:\n - name: Upload a single blob to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/file\" -d NewBlob\n - name: Upload a directory to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory\" --recursive\n - name: Upload the contents of a directory to a container.\n text: storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s \"path/to/directory/*\" --recursive\n\"\"\"\n\nhelps['storage azcopy blob download'] = \"\"\"\n type: command\n short-summary: Download blobs from a storage blob container using AzCopy.\n examples:\n - name: Download a single blob from a container.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/blob\" -d \"path/to/file\"\n - name: Download a virtual directory from a container.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s \"path/to/virtual_directory\" -d \"download/path\" --recursive\n - name: Download the contents of a container onto a local file system.\n text: storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d \"download/path\" --recursive\n\"\"\"\n\nhelps['storage azcopy blob delete'] = \"\"\"\n type: command\n short-summary: Delete blobs from a storage blob container using AzCopy.\n examples:\n - name: Delete a single blob from a container.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob\n - name: Delete all blobs from a container.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive\n - name: Delete all blobs in a virtual directory.\n text: storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t \"path/to/virtual_directory\" --recursive\n\"\"\"\n\nhelps['storage azcopy run-command'] = \"\"\"\n type: command\n short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.\n\"\"\"\n", "path": "src/storage-preview/azext_storage_preview/_help.py"}]}
3,417
226
gh_patches_debug_11962
rasdani/github-patches
git_diff
svthalia__concrexit-2591
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Updating promo requests **Describe the bug** It is not possible to update the promo request within a week of the publish date **How to reproduce** Create a promo request Try to update the designer within a week to publish date **Expected behavior** The request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited. </issue> <code> [start of website/promotion/forms.py] 1 from django import forms 2 from django.utils import timezone 3 4 from promotion.models import PromotionRequest 5 from thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA 6 7 8 class PromotionRequestForm(forms.ModelForm): 9 class Meta: 10 model = PromotionRequest 11 fields = [ 12 "event", 13 "publish_date", 14 "channel", 15 "assigned_to", 16 "status", 17 "drive_folder", 18 "remarks", 19 ] 20 21 def clean_publish_date(self): 22 publish_date = self.cleaned_data.get("publish_date") 23 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA 24 if timezone.localdate() > create_time_minimum: 25 raise forms.ValidationError( 26 "Publish date cannot be within a week from now." 27 ) 28 if "publish_date" in self.changed_data: 29 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA 30 if timezone.localdate() > create_time_minimum: 31 raise forms.ValidationError( 32 "Publish date cannot be within a week from now." 33 ) 34 return publish_date 35 [end of website/promotion/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/promotion/forms.py b/website/promotion/forms.py --- a/website/promotion/forms.py +++ b/website/promotion/forms.py @@ -20,11 +20,6 @@ def clean_publish_date(self): publish_date = self.cleaned_data.get("publish_date") - create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA - if timezone.localdate() > create_time_minimum: - raise forms.ValidationError( - "Publish date cannot be within a week from now." - ) if "publish_date" in self.changed_data: create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA if timezone.localdate() > create_time_minimum:
{"golden_diff": "diff --git a/website/promotion/forms.py b/website/promotion/forms.py\n--- a/website/promotion/forms.py\n+++ b/website/promotion/forms.py\n@@ -20,11 +20,6 @@\n \n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n- if timezone.localdate() > create_time_minimum:\n- raise forms.ValidationError(\n- \"Publish date cannot be within a week from now.\"\n- )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n", "issue": "Updating promo requests \n**Describe the bug**\r\nIt is not possible to update the promo request within a week of the publish date \r\n\r\n**How to reproduce** \r\nCreate a promo request \r\nTry to update the designer within a week to publish date \r\n\r\n**Expected behavior** \r\nThe request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited. \n", "before_files": [{"content": "from django import forms\nfrom django.utils import timezone\n\nfrom promotion.models import PromotionRequest\nfrom thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\n\n\nclass PromotionRequestForm(forms.ModelForm):\n class Meta:\n model = PromotionRequest\n fields = [\n \"event\",\n \"publish_date\",\n \"channel\",\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"remarks\",\n ]\n\n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n return publish_date\n", "path": "website/promotion/forms.py"}]}
900
160
gh_patches_debug_33629
rasdani/github-patches
git_diff
pre-commit__pre-commit-372
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Not-found executable crashes framework This was introduced with the new exe logic in 0.8.0 Here's a simple reproduction: ``` yaml - repo: local hooks: - id: test name: test language: system entry: i-dont-exist-lol files: '\.py$' ``` ``` $ pre-commit run --all-files test.....................................................................An unexpected error has occurred: OSError: Executable i-dont-exist-lol not found Check the log at ~/.pre-commit/pre-commit.log ``` </issue> <code> [start of pre_commit/util.py] 1 from __future__ import unicode_literals 2 3 import contextlib 4 import errno 5 import functools 6 import os 7 import os.path 8 import shutil 9 import stat 10 import subprocess 11 import tarfile 12 import tempfile 13 14 import pkg_resources 15 16 from pre_commit import five 17 from pre_commit import parse_shebang 18 19 20 @contextlib.contextmanager 21 def cwd(path): 22 original_cwd = os.getcwd() 23 os.chdir(path) 24 try: 25 yield 26 finally: 27 os.chdir(original_cwd) 28 29 30 def mkdirp(path): 31 try: 32 os.makedirs(path) 33 except OSError: 34 if not os.path.exists(path): 35 raise 36 37 38 def memoize_by_cwd(func): 39 """Memoize a function call based on os.getcwd().""" 40 @functools.wraps(func) 41 def wrapper(*args): 42 cwd = os.getcwd() 43 key = (cwd,) + args 44 try: 45 return wrapper._cache[key] 46 except KeyError: 47 ret = wrapper._cache[key] = func(*args) 48 return ret 49 50 wrapper._cache = {} 51 52 return wrapper 53 54 55 @contextlib.contextmanager 56 def clean_path_on_failure(path): 57 """Cleans up the directory on an exceptional failure.""" 58 try: 59 yield 60 except BaseException: 61 if os.path.exists(path): 62 rmtree(path) 63 raise 64 65 66 @contextlib.contextmanager 67 def noop_context(): 68 yield 69 70 71 def no_git_env(): 72 # Too many bugs dealing with environment variables and GIT: 73 # https://github.com/pre-commit/pre-commit/issues/300 74 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running 75 # pre-commit hooks 76 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE 77 # while running pre-commit hooks in submodules. 78 # GIT_DIR: Causes git clone to clone wrong thing 79 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit 80 return dict( 81 (k, v) for k, v in os.environ.items() if not k.startswith('GIT_') 82 ) 83 84 85 @contextlib.contextmanager 86 def tarfile_open(*args, **kwargs): 87 """Compatibility layer because python2.6""" 88 tf = tarfile.open(*args, **kwargs) 89 try: 90 yield tf 91 finally: 92 tf.close() 93 94 95 @contextlib.contextmanager 96 def tmpdir(): 97 """Contextmanager to create a temporary directory. It will be cleaned up 98 afterwards. 99 """ 100 tempdir = tempfile.mkdtemp() 101 try: 102 yield tempdir 103 finally: 104 rmtree(tempdir) 105 106 107 def resource_filename(filename): 108 return pkg_resources.resource_filename( 109 'pre_commit', 110 os.path.join('resources', filename), 111 ) 112 113 114 def make_executable(filename): 115 original_mode = os.stat(filename).st_mode 116 os.chmod( 117 filename, 118 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, 119 ) 120 121 122 class CalledProcessError(RuntimeError): 123 def __init__(self, returncode, cmd, expected_returncode, output=None): 124 super(CalledProcessError, self).__init__( 125 returncode, cmd, expected_returncode, output, 126 ) 127 self.returncode = returncode 128 self.cmd = cmd 129 self.expected_returncode = expected_returncode 130 self.output = output 131 132 def to_bytes(self): 133 output = [] 134 for maybe_text in self.output: 135 if maybe_text: 136 output.append( 137 b'\n ' + 138 five.to_bytes(maybe_text).replace(b'\n', b'\n ') 139 ) 140 else: 141 output.append(b'(none)') 142 143 return b''.join(( 144 five.to_bytes( 145 'Command: {0!r}\n' 146 'Return code: {1}\n' 147 'Expected return code: {2}\n'.format( 148 self.cmd, self.returncode, self.expected_returncode 149 ) 150 ), 151 b'Output: ', output[0], b'\n', 152 b'Errors: ', output[1], b'\n', 153 )) 154 155 def to_text(self): 156 return self.to_bytes().decode('UTF-8') 157 158 if five.PY3: # pragma: no cover (py3) 159 __bytes__ = to_bytes 160 __str__ = to_text 161 else: # pragma: no cover (py2) 162 __str__ = to_bytes 163 __unicode__ = to_text 164 165 166 def cmd_output(*cmd, **kwargs): 167 retcode = kwargs.pop('retcode', 0) 168 encoding = kwargs.pop('encoding', 'UTF-8') 169 __popen = kwargs.pop('__popen', subprocess.Popen) 170 171 popen_kwargs = { 172 'stdin': subprocess.PIPE, 173 'stdout': subprocess.PIPE, 174 'stderr': subprocess.PIPE, 175 } 176 177 # py2/py3 on windows are more strict about the types here 178 cmd = tuple(five.n(arg) for arg in cmd) 179 kwargs['env'] = dict( 180 (five.n(key), five.n(value)) 181 for key, value in kwargs.pop('env', {}).items() 182 ) or None 183 184 cmd = parse_shebang.normalize_cmd(cmd) 185 186 popen_kwargs.update(kwargs) 187 proc = __popen(cmd, **popen_kwargs) 188 stdout, stderr = proc.communicate() 189 if encoding is not None and stdout is not None: 190 stdout = stdout.decode(encoding) 191 if encoding is not None and stderr is not None: 192 stderr = stderr.decode(encoding) 193 returncode = proc.returncode 194 195 if retcode is not None and retcode != returncode: 196 raise CalledProcessError( 197 returncode, cmd, retcode, output=(stdout, stderr), 198 ) 199 200 return proc.returncode, stdout, stderr 201 202 203 def rmtree(path): 204 """On windows, rmtree fails for readonly dirs.""" 205 def handle_remove_readonly(func, path, exc): # pragma: no cover (windows) 206 excvalue = exc[1] 207 if ( 208 func in (os.rmdir, os.remove, os.unlink) and 209 excvalue.errno == errno.EACCES 210 ): 211 os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) 212 func(path) 213 else: 214 raise 215 shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly) 216 [end of pre_commit/util.py] [start of pre_commit/parse_shebang.py] 1 from __future__ import absolute_import 2 from __future__ import unicode_literals 3 4 import io 5 import os.path 6 import shlex 7 import string 8 9 from pre_commit import five 10 11 12 printable = frozenset(string.printable) 13 14 15 def parse_bytesio(bytesio): 16 """Parse the shebang from a file opened for reading binary.""" 17 if bytesio.read(2) != b'#!': 18 return () 19 first_line = bytesio.readline() 20 try: 21 first_line = first_line.decode('US-ASCII') 22 except UnicodeDecodeError: 23 return () 24 25 # Require only printable ascii 26 for c in first_line: 27 if c not in printable: 28 return () 29 30 # shlex.split is horribly broken in py26 on text strings 31 cmd = tuple(shlex.split(five.n(first_line))) 32 if cmd[0] == '/usr/bin/env': 33 cmd = cmd[1:] 34 return cmd 35 36 37 def parse_filename(filename): 38 """Parse the shebang given a filename.""" 39 if not os.path.exists(filename) or not os.access(filename, os.X_OK): 40 return () 41 42 with io.open(filename, 'rb') as f: 43 return parse_bytesio(f) 44 45 46 def find_executable(exe, _environ=None): 47 exe = os.path.normpath(exe) 48 if os.sep in exe: 49 return exe 50 51 environ = _environ if _environ is not None else os.environ 52 53 if 'PATHEXT' in environ: 54 possible_exe_names = (exe,) + tuple( 55 exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep) 56 ) 57 else: 58 possible_exe_names = (exe,) 59 60 for path in environ.get('PATH', '').split(os.pathsep): 61 for possible_exe_name in possible_exe_names: 62 joined = os.path.join(path, possible_exe_name) 63 if os.path.isfile(joined) and os.access(joined, os.X_OK): 64 return joined 65 else: 66 return None 67 68 69 def normexe(orig_exe): 70 if os.sep not in orig_exe: 71 exe = find_executable(orig_exe) 72 if exe is None: 73 raise OSError('Executable {0} not found'.format(orig_exe)) 74 return exe 75 else: 76 return orig_exe 77 78 79 def normalize_cmd(cmd): 80 """Fixes for the following issues on windows 81 - http://bugs.python.org/issue8557 82 - windows does not parse shebangs 83 84 This function also makes deep-path shebangs work just fine 85 """ 86 # Use PATH to determine the executable 87 exe = normexe(cmd[0]) 88 89 # Figure out the shebang from the resulting command 90 cmd = parse_filename(exe) + (exe,) + cmd[1:] 91 92 # This could have given us back another bare executable 93 exe = normexe(cmd[0]) 94 95 return (exe,) + cmd[1:] 96 [end of pre_commit/parse_shebang.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py --- a/pre_commit/parse_shebang.py +++ b/pre_commit/parse_shebang.py @@ -12,6 +12,10 @@ printable = frozenset(string.printable) +class ExecutableNotFoundError(OSError): + pass + + def parse_bytesio(bytesio): """Parse the shebang from a file opened for reading binary.""" if bytesio.read(2) != b'#!': @@ -70,7 +74,9 @@ if os.sep not in orig_exe: exe = find_executable(orig_exe) if exe is None: - raise OSError('Executable {0} not found'.format(orig_exe)) + raise ExecutableNotFoundError( + 'Executable `{0}` not found'.format(orig_exe), + ) return exe else: return orig_exe diff --git a/pre_commit/util.py b/pre_commit/util.py --- a/pre_commit/util.py +++ b/pre_commit/util.py @@ -181,23 +181,26 @@ for key, value in kwargs.pop('env', {}).items() ) or None - cmd = parse_shebang.normalize_cmd(cmd) - - popen_kwargs.update(kwargs) - proc = __popen(cmd, **popen_kwargs) - stdout, stderr = proc.communicate() - if encoding is not None and stdout is not None: - stdout = stdout.decode(encoding) - if encoding is not None and stderr is not None: - stderr = stderr.decode(encoding) - returncode = proc.returncode + try: + cmd = parse_shebang.normalize_cmd(cmd) + except parse_shebang.ExecutableNotFoundError as e: + returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'') + else: + popen_kwargs.update(kwargs) + proc = __popen(cmd, **popen_kwargs) + stdout, stderr = proc.communicate() + if encoding is not None and stdout is not None: + stdout = stdout.decode(encoding) + if encoding is not None and stderr is not None: + stderr = stderr.decode(encoding) + returncode = proc.returncode if retcode is not None and retcode != returncode: raise CalledProcessError( returncode, cmd, retcode, output=(stdout, stderr), ) - return proc.returncode, stdout, stderr + return returncode, stdout, stderr def rmtree(path):
{"golden_diff": "diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py\n--- a/pre_commit/parse_shebang.py\n+++ b/pre_commit/parse_shebang.py\n@@ -12,6 +12,10 @@\n printable = frozenset(string.printable)\n \n \n+class ExecutableNotFoundError(OSError):\n+ pass\n+\n+\n def parse_bytesio(bytesio):\n \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n if bytesio.read(2) != b'#!':\n@@ -70,7 +74,9 @@\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n- raise OSError('Executable {0} not found'.format(orig_exe))\n+ raise ExecutableNotFoundError(\n+ 'Executable `{0}` not found'.format(orig_exe),\n+ )\n return exe\n else:\n return orig_exe\ndiff --git a/pre_commit/util.py b/pre_commit/util.py\n--- a/pre_commit/util.py\n+++ b/pre_commit/util.py\n@@ -181,23 +181,26 @@\n for key, value in kwargs.pop('env', {}).items()\n ) or None\n \n- cmd = parse_shebang.normalize_cmd(cmd)\n-\n- popen_kwargs.update(kwargs)\n- proc = __popen(cmd, **popen_kwargs)\n- stdout, stderr = proc.communicate()\n- if encoding is not None and stdout is not None:\n- stdout = stdout.decode(encoding)\n- if encoding is not None and stderr is not None:\n- stderr = stderr.decode(encoding)\n- returncode = proc.returncode\n+ try:\n+ cmd = parse_shebang.normalize_cmd(cmd)\n+ except parse_shebang.ExecutableNotFoundError as e:\n+ returncode, stdout, stderr = (-1, e.args[0].encode('UTF-8'), b'')\n+ else:\n+ popen_kwargs.update(kwargs)\n+ proc = __popen(cmd, **popen_kwargs)\n+ stdout, stderr = proc.communicate()\n+ if encoding is not None and stdout is not None:\n+ stdout = stdout.decode(encoding)\n+ if encoding is not None and stderr is not None:\n+ stderr = stderr.decode(encoding)\n+ returncode = proc.returncode\n \n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n \n- return proc.returncode, stdout, stderr\n+ return returncode, stdout, stderr\n \n \n def rmtree(path):\n", "issue": "Not-found executable crashes framework\nThis was introduced with the new exe logic in 0.8.0\n\nHere's a simple reproduction:\n\n``` yaml\n- repo: local\n hooks:\n - id: test\n name: test\n language: system\n entry: i-dont-exist-lol\n files: '\\.py$'\n```\n\n```\n$ pre-commit run --all-files\ntest.....................................................................An unexpected error has occurred: OSError: Executable i-dont-exist-lol not found\nCheck the log at ~/.pre-commit/pre-commit.log\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport errno\nimport functools\nimport os\nimport os.path\nimport shutil\nimport stat\nimport subprocess\nimport tarfile\nimport tempfile\n\nimport pkg_resources\n\nfrom pre_commit import five\nfrom pre_commit import parse_shebang\n\n\[email protected]\ndef cwd(path):\n original_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(original_cwd)\n\n\ndef mkdirp(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.exists(path):\n raise\n\n\ndef memoize_by_cwd(func):\n \"\"\"Memoize a function call based on os.getcwd().\"\"\"\n @functools.wraps(func)\n def wrapper(*args):\n cwd = os.getcwd()\n key = (cwd,) + args\n try:\n return wrapper._cache[key]\n except KeyError:\n ret = wrapper._cache[key] = func(*args)\n return ret\n\n wrapper._cache = {}\n\n return wrapper\n\n\[email protected]\ndef clean_path_on_failure(path):\n \"\"\"Cleans up the directory on an exceptional failure.\"\"\"\n try:\n yield\n except BaseException:\n if os.path.exists(path):\n rmtree(path)\n raise\n\n\[email protected]\ndef noop_context():\n yield\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return dict(\n (k, v) for k, v in os.environ.items() if not k.startswith('GIT_')\n )\n\n\[email protected]\ndef tarfile_open(*args, **kwargs):\n \"\"\"Compatibility layer because python2.6\"\"\"\n tf = tarfile.open(*args, **kwargs)\n try:\n yield tf\n finally:\n tf.close()\n\n\[email protected]\ndef tmpdir():\n \"\"\"Contextmanager to create a temporary directory. It will be cleaned up\n afterwards.\n \"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n yield tempdir\n finally:\n rmtree(tempdir)\n\n\ndef resource_filename(filename):\n return pkg_resources.resource_filename(\n 'pre_commit',\n os.path.join('resources', filename),\n )\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\nclass CalledProcessError(RuntimeError):\n def __init__(self, returncode, cmd, expected_returncode, output=None):\n super(CalledProcessError, self).__init__(\n returncode, cmd, expected_returncode, output,\n )\n self.returncode = returncode\n self.cmd = cmd\n self.expected_returncode = expected_returncode\n self.output = output\n\n def to_bytes(self):\n output = []\n for maybe_text in self.output:\n if maybe_text:\n output.append(\n b'\\n ' +\n five.to_bytes(maybe_text).replace(b'\\n', b'\\n ')\n )\n else:\n output.append(b'(none)')\n\n return b''.join((\n five.to_bytes(\n 'Command: {0!r}\\n'\n 'Return code: {1}\\n'\n 'Expected return code: {2}\\n'.format(\n self.cmd, self.returncode, self.expected_returncode\n )\n ),\n b'Output: ', output[0], b'\\n',\n b'Errors: ', output[1], b'\\n',\n ))\n\n def to_text(self):\n return self.to_bytes().decode('UTF-8')\n\n if five.PY3: # pragma: no cover (py3)\n __bytes__ = to_bytes\n __str__ = to_text\n else: # pragma: no cover (py2)\n __str__ = to_bytes\n __unicode__ = to_text\n\n\ndef cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n __popen = kwargs.pop('__popen', subprocess.Popen)\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the types here\n cmd = tuple(five.n(arg) for arg in cmd)\n kwargs['env'] = dict(\n (five.n(key), five.n(value))\n for key, value in kwargs.pop('env', {}).items()\n ) or None\n\n cmd = parse_shebang.normalize_cmd(cmd)\n\n popen_kwargs.update(kwargs)\n proc = __popen(cmd, **popen_kwargs)\n stdout, stderr = proc.communicate()\n if encoding is not None and stdout is not None:\n stdout = stdout.decode(encoding)\n if encoding is not None and stderr is not None:\n stderr = stderr.decode(encoding)\n returncode = proc.returncode\n\n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n\n return proc.returncode, stdout, stderr\n\n\ndef rmtree(path):\n \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n excvalue = exc[1]\n if (\n func in (os.rmdir, os.remove, os.unlink) and\n excvalue.errno == errno.EACCES\n ):\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\n", "path": "pre_commit/util.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport io\nimport os.path\nimport shlex\nimport string\n\nfrom pre_commit import five\n\n\nprintable = frozenset(string.printable)\n\n\ndef parse_bytesio(bytesio):\n \"\"\"Parse the shebang from a file opened for reading binary.\"\"\"\n if bytesio.read(2) != b'#!':\n return ()\n first_line = bytesio.readline()\n try:\n first_line = first_line.decode('US-ASCII')\n except UnicodeDecodeError:\n return ()\n\n # Require only printable ascii\n for c in first_line:\n if c not in printable:\n return ()\n\n # shlex.split is horribly broken in py26 on text strings\n cmd = tuple(shlex.split(five.n(first_line)))\n if cmd[0] == '/usr/bin/env':\n cmd = cmd[1:]\n return cmd\n\n\ndef parse_filename(filename):\n \"\"\"Parse the shebang given a filename.\"\"\"\n if not os.path.exists(filename) or not os.access(filename, os.X_OK):\n return ()\n\n with io.open(filename, 'rb') as f:\n return parse_bytesio(f)\n\n\ndef find_executable(exe, _environ=None):\n exe = os.path.normpath(exe)\n if os.sep in exe:\n return exe\n\n environ = _environ if _environ is not None else os.environ\n\n if 'PATHEXT' in environ:\n possible_exe_names = (exe,) + tuple(\n exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\n )\n else:\n possible_exe_names = (exe,)\n\n for path in environ.get('PATH', '').split(os.pathsep):\n for possible_exe_name in possible_exe_names:\n joined = os.path.join(path, possible_exe_name)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n else:\n return None\n\n\ndef normexe(orig_exe):\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n raise OSError('Executable {0} not found'.format(orig_exe))\n return exe\n else:\n return orig_exe\n\n\ndef normalize_cmd(cmd):\n \"\"\"Fixes for the following issues on windows\n - http://bugs.python.org/issue8557\n - windows does not parse shebangs\n\n This function also makes deep-path shebangs work just fine\n \"\"\"\n # Use PATH to determine the executable\n exe = normexe(cmd[0])\n\n # Figure out the shebang from the resulting command\n cmd = parse_filename(exe) + (exe,) + cmd[1:]\n\n # This could have given us back another bare executable\n exe = normexe(cmd[0])\n\n return (exe,) + cmd[1:]\n", "path": "pre_commit/parse_shebang.py"}]}
3,419
569
gh_patches_debug_29010
rasdani/github-patches
git_diff
Flexget__Flexget-2284
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> descargas2020 plugin broken ### Expected behaviour: it downloads torrent properly. ### Actual behaviour: ### Steps to reproduce: add this rss http://feeds.feedburner.com/descargas2020new flexget execute you will get: 2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/ 2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None) 2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved. #### Config: ``` tasks: series_sigo: rss: url: http://feeds.feedburner.com/descargas2020new link: link all_series: yes seen: local regexp: reject: - PCDVD accept_all: yes thetvdb_lookup: yes no_entries_ok: yes set: filename: "{{title|pathscrub}}.torrent" download: /home/flexget/torrent/ ``` #### Log: 2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/ 2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None) 2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved. ``` paste log output here ``` </details> ### Additional information: - FlexGet version: 2.17.20 - Python version: 2.7.13 - Installation method: i don't remember - Using daemon (yes/no): no - OS and version: debian 9 x64 - Link to crash log: <!--- In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on. Please verify that the following data is present before submitting your issue: - Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible. - Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`). - FlexGet version (use `flexget -V` to get it). - Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6. - Installation method (pip, git install, etc). - Whether or not you're running FlexGet as a daemon. - OS and version. - Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file. ---> </issue> <code> [start of flexget/plugins/sites/descargas2020.py] 1 from __future__ import unicode_literals, division, absolute_import 2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin 3 4 import logging 5 import re 6 7 from flexget import plugin 8 from flexget.event import event 9 from flexget.plugins.internal.urlrewriting import UrlRewritingError 10 from flexget.utils.requests import Session, TimedLimiter 11 from flexget.utils.soup import get_soup 12 from flexget.utils import requests 13 14 from flexget.entry import Entry 15 from flexget.utils.search import normalize_unicode 16 17 import unicodedata 18 19 log = logging.getLogger('descargas2020') 20 21 requests = Session() 22 requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}) 23 requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds')) 24 25 DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent' 26 27 class UrlRewriteDescargas2020(object): 28 """Descargas2020 urlrewriter and search.""" 29 30 schema = { 31 'type': 'boolean', 32 'default': False 33 } 34 35 # urlrewriter API 36 def url_rewritable(self, task, entry): 37 url = entry['url'] 38 rewritable_regex = '^http:\/\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\/.*' 39 return re.match(rewritable_regex, url) and not url.endswith('.torrent') 40 41 # urlrewriter API 42 def url_rewrite(self, task, entry): 43 entry['url'] = self.parse_download_page(entry['url'], task) 44 45 @plugin.internet(log) 46 def parse_download_page(self, url, task): 47 log.verbose('Descargas2020 URL: %s', url) 48 49 try: 50 page = requests.get(url) 51 except requests.exceptions.RequestException as e: 52 raise UrlRewritingError(e) 53 try: 54 soup = get_soup(page.text) 55 except Exception as e: 56 raise UrlRewritingError(e) 57 58 torrent_id = None 59 url_format = DESCARGAS2020_TORRENT_FORMAT 60 61 torrent_id_prog = re.compile("(?:parametros\s*=\s*\n?)\s*{\s*\n(?:\s*'\w+'\s*:.*\n)+\s*'(?:torrentID|id)" 62 "'\s*:\s*'(\d+)'") 63 torrent_ids = soup.findAll(text=torrent_id_prog) 64 if torrent_ids: 65 match = torrent_id_prog.search(torrent_ids[0]) 66 if match: 67 torrent_id = match.group(1) 68 if not torrent_id: 69 log.debug('torrent ID not found, searching openTorrent script') 70 torrent_id_prog = re.compile('function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\"(.*\/\d+_-.*[^\/])\/?\";') 71 torrent_ids = soup.findAll(text=torrent_id_prog) 72 if torrent_ids: 73 match = torrent_id_prog.search(torrent_ids[0]) 74 if match: 75 torrent_id = match.group(2) 76 return torrent_id.replace('descargar-torrent', 'download') + '.torrent' 77 78 if not torrent_id: 79 raise UrlRewritingError('Unable to locate torrent ID from url %s' % url) 80 81 return url_format.format(torrent_id) 82 83 def search(self, task, entry, config=None): 84 if not config: 85 log.debug('Descargas2020 disabled') 86 return set() 87 log.debug('Search Descargas2020') 88 url_search = 'http://descargas2020.com/buscar' 89 results = set() 90 for search_string in entry.get('search_strings', [entry['title']]): 91 query = normalize_unicode(search_string) 92 query = re.sub(' \(\d\d\d\d\)$', '', query) 93 log.debug('Searching Descargas2020 %s', query) 94 query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore') 95 data = {'q': query} 96 try: 97 response = task.requests.post(url_search, data=data) 98 except requests.RequestException as e: 99 log.error('Error searching Descargas2020: %s', e) 100 return results 101 content = response.content 102 soup = get_soup(content) 103 soup2 = soup.find('ul', attrs={'class': 'buscar-list'}) 104 children = soup2.findAll('a', href=True) 105 for child in children: 106 entry = Entry() 107 entry['url'] = child['href'] 108 entry_title = child.find('h2') 109 if entry_title is None: 110 log.debug('Ignore empty entry') 111 continue 112 entry_title = entry_title.text 113 if not entry_title: 114 continue 115 try: 116 entry_quality_lan = re.search('.+ \[([^\]]+)\](\[[^\]]+\])+$', entry_title).group(1) 117 except AttributeError: 118 log.debug('Quality not found') 119 continue 120 entry_title = re.sub(' \[.+]$', '', entry_title) 121 entry['title'] = entry_title + ' ' + entry_quality_lan 122 results.add(entry) 123 log.debug('Finish search Descargas2020 with %d entries', len(results)) 124 return results 125 126 @event('plugin.register') 127 def register_plugin(): 128 plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2) 129 [end of flexget/plugins/sites/descargas2020.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flexget/plugins/sites/descargas2020.py b/flexget/plugins/sites/descargas2020.py --- a/flexget/plugins/sites/descargas2020.py +++ b/flexget/plugins/sites/descargas2020.py @@ -22,7 +22,7 @@ requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}) requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds')) -DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent' +DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent' class UrlRewriteDescargas2020(object): """Descargas2020 urlrewriter and search.""" @@ -67,13 +67,10 @@ torrent_id = match.group(1) if not torrent_id: log.debug('torrent ID not found, searching openTorrent script') - torrent_id_prog = re.compile('function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\"(.*\/\d+_-.*[^\/])\/?\";') - torrent_ids = soup.findAll(text=torrent_id_prog) - if torrent_ids: - match = torrent_id_prog.search(torrent_ids[0]) - if match: - torrent_id = match.group(2) - return torrent_id.replace('descargar-torrent', 'download') + '.torrent' + match = re.search('function openTorrent.*\n.*\{.*(\n.*)+window\.location\.href =\s*\".*\/(\d+.*)\";', + page.text, re.MULTILINE) + if match: + torrent_id = match.group(2).rstrip('/') if not torrent_id: raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)
{"golden_diff": "diff --git a/flexget/plugins/sites/descargas2020.py b/flexget/plugins/sites/descargas2020.py\n--- a/flexget/plugins/sites/descargas2020.py\n+++ b/flexget/plugins/sites/descargas2020.py\n@@ -22,7 +22,7 @@\n requests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\n requests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n \n-DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\n+DESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/download/{:0>6}.torrent'\n \n class UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n@@ -67,13 +67,10 @@\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n- torrent_id_prog = re.compile('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\"(.*\\/\\d+_-.*[^\\/])\\/?\\\";')\n- torrent_ids = soup.findAll(text=torrent_id_prog)\n- if torrent_ids:\n- match = torrent_id_prog.search(torrent_ids[0])\n- if match:\n- torrent_id = match.group(2)\n- return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\n+ match = re.search('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\".*\\/(\\d+.*)\\\";',\n+ page.text, re.MULTILINE)\n+ if match:\n+ torrent_id = match.group(2).rstrip('/')\n \n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n", "issue": "descargas2020 plugin broken\n### Expected behaviour:\r\nit downloads torrent properly.\r\n\r\n### Actual behaviour:\r\n\r\n### Steps to reproduce:\r\nadd this rss http://feeds.feedburner.com/descargas2020new\r\nflexget execute\r\nyou will get:\r\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\r\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\r\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\r\n\r\n#### Config:\r\n\r\n```\r\ntasks:\r\n series_sigo:\r\n rss:\r\n url: http://feeds.feedburner.com/descargas2020new\r\n link: link\r\n all_series: yes\r\n seen: local\r\n regexp:\r\n reject:\r\n - PCDVD\r\n accept_all: yes\r\n thetvdb_lookup: yes\r\n no_entries_ok: yes\r\n set:\r\n filename: \"{{title|pathscrub}}.torrent\"\r\n download: /home/flexget/torrent/\r\n```\r\n \r\n#### Log:\r\n\r\n2018-12-20 07:45 WARNING urlrewriter series_sigo URL rewriting descargas2020 failed: Unable to locate torrent ID from url http://descargas2020.com/descargar/serie-en-hd/9-1-1/temporada-2/capitulo-10/\r\n2018-12-20 07:45 ERROR entry series_sigo Failed 9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com]9-1-1 - Temporada 2 [HDTV 720p][Cap.210][AC3 5.1 Castellano][www.pctnew.com] (None)\r\n2018-12-20 07:45 INFO task series_sigo Rerunning the task in case better resolution can be achieved.\r\n\r\n```\r\npaste log output here\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.17.20\r\n- Python version: 2.7.13\r\n- Installation method: i don't remember\r\n- Using daemon (yes/no): no\r\n- OS and version: debian 9 x64\r\n- Link to crash log:\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport re\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.requests import Session, TimedLimiter\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils import requests\n\nfrom flexget.entry import Entry\nfrom flexget.utils.search import normalize_unicode\n\nimport unicodedata\n\nlog = logging.getLogger('descargas2020')\n\nrequests = Session()\nrequests.headers.update({'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})\nrequests.add_domain_limiter(TimedLimiter('descargas2020.com', '2 seconds'))\n\nDESCARGAS2020_TORRENT_FORMAT = 'http://descargas2020.com/torrents/{:0>6}.torrent'\n\nclass UrlRewriteDescargas2020(object):\n \"\"\"Descargas2020 urlrewriter and search.\"\"\"\n\n schema = {\n 'type': 'boolean',\n 'default': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n rewritable_regex = '^http:\\/\\/(www.)?(descargas2020|tvsinpagar|tumejortorrent|torrentlocura|torrentrapid).com\\/.*'\n return re.match(rewritable_regex, url) and not url.endswith('.torrent')\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task)\n\n @plugin.internet(log)\n def parse_download_page(self, url, task):\n log.verbose('Descargas2020 URL: %s', url)\n\n try:\n page = requests.get(url)\n except requests.exceptions.RequestException as e:\n raise UrlRewritingError(e)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n\n torrent_id = None\n url_format = DESCARGAS2020_TORRENT_FORMAT\n\n torrent_id_prog = re.compile(\"(?:parametros\\s*=\\s*\\n?)\\s*{\\s*\\n(?:\\s*'\\w+'\\s*:.*\\n)+\\s*'(?:torrentID|id)\"\n \"'\\s*:\\s*'(\\d+)'\")\n torrent_ids = soup.findAll(text=torrent_id_prog)\n if torrent_ids:\n match = torrent_id_prog.search(torrent_ids[0])\n if match:\n torrent_id = match.group(1)\n if not torrent_id:\n log.debug('torrent ID not found, searching openTorrent script')\n torrent_id_prog = re.compile('function openTorrent.*\\n.*\\{.*(\\n.*)+window\\.location\\.href =\\s*\\\"(.*\\/\\d+_-.*[^\\/])\\/?\\\";')\n torrent_ids = soup.findAll(text=torrent_id_prog)\n if torrent_ids:\n match = torrent_id_prog.search(torrent_ids[0])\n if match:\n torrent_id = match.group(2)\n return torrent_id.replace('descargar-torrent', 'download') + '.torrent'\n\n if not torrent_id:\n raise UrlRewritingError('Unable to locate torrent ID from url %s' % url)\n\n return url_format.format(torrent_id)\n\n def search(self, task, entry, config=None):\n if not config:\n log.debug('Descargas2020 disabled')\n return set()\n log.debug('Search Descargas2020')\n url_search = 'http://descargas2020.com/buscar'\n results = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n query = normalize_unicode(search_string)\n query = re.sub(' \\(\\d\\d\\d\\d\\)$', '', query)\n log.debug('Searching Descargas2020 %s', query)\n query = unicodedata.normalize('NFD', query).encode('ascii', 'ignore')\n data = {'q': query}\n try:\n response = task.requests.post(url_search, data=data)\n except requests.RequestException as e:\n log.error('Error searching Descargas2020: %s', e)\n return results\n content = response.content\n soup = get_soup(content)\n soup2 = soup.find('ul', attrs={'class': 'buscar-list'})\n children = soup2.findAll('a', href=True)\n for child in children:\n entry = Entry()\n entry['url'] = child['href']\n entry_title = child.find('h2')\n if entry_title is None:\n log.debug('Ignore empty entry')\n continue\n entry_title = entry_title.text\n if not entry_title:\n continue\n try:\n entry_quality_lan = re.search('.+ \\[([^\\]]+)\\](\\[[^\\]]+\\])+$', entry_title).group(1)\n except AttributeError:\n log.debug('Quality not found')\n continue\n entry_title = re.sub(' \\[.+]$', '', entry_title)\n entry['title'] = entry_title + ' ' + entry_quality_lan\n results.add(entry)\n log.debug('Finish search Descargas2020 with %d entries', len(results))\n return results\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteDescargas2020, 'descargas2020', interfaces=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/descargas2020.py"}]}
3,026
472
gh_patches_debug_2192
rasdani/github-patches
git_diff
LMFDB__lmfdb-5179
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PIP dependencies We have several deprecated dependencies that we should fix ASAP ``` flask<=1.1.4 markupsafe<=2.0.1 itsdangerous<=2.0.1 ``` in particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis. </issue> <code> [start of lmfdb/local_fields/__init__.py] 1 # -*- coding: utf-8 -*- 2 from lmfdb.app import app 3 from lmfdb.logger import make_logger 4 from flask import Blueprint, request, redirect 5 6 local_fields_page = Blueprint("local_fields", __name__, template_folder='templates', static_folder="static") 7 logger = make_logger(local_fields_page) 8 9 10 @local_fields_page.context_processor 11 def body_class(): 12 return {'body_class': 'local_fields'} 13 14 from . import main 15 assert main 16 17 from urllib.parse import urlparse, urlunparse 18 19 20 @local_fields_page.before_request 21 def redirect_local(): 22 urlparts = urlparse(request.url) 23 if 'LocalNumberField' in urlparts.path: 24 urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField')) 25 return redirect(urlunparse(urlparts), 301) 26 return 27 28 29 app.register_blueprint(local_fields_page, url_prefix="/padicField") 30 app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField") 31 32 # API2 has been disabled for now 33 #from lmfdb.api2.searchers import register_search_function 34 #register_search_function( 35 # "$p$-adic_fields", 36 # "$p$-adic fields", 37 # "Search over $p$-adic fields", 38 # auto_search = 'lf_fields' 39 #) 40 [end of lmfdb/local_fields/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py --- a/lmfdb/local_fields/__init__.py +++ b/lmfdb/local_fields/__init__.py @@ -27,7 +27,6 @@ app.register_blueprint(local_fields_page, url_prefix="/padicField") -app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField") # API2 has been disabled for now #from lmfdb.api2.searchers import register_search_function
{"golden_diff": "diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py\n--- a/lmfdb/local_fields/__init__.py\n+++ b/lmfdb/local_fields/__init__.py\n@@ -27,7 +27,6 @@\n \n \n app.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n-app.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n \n # API2 has been disabled for now\n #from lmfdb.api2.searchers import register_search_function\n", "issue": "PIP dependencies\nWe have several deprecated dependencies that we should fix ASAP\r\n```\r\nflask<=1.1.4\r\nmarkupsafe<=2.0.1\r\nitsdangerous<=2.0.1\r\n```\r\n\r\nin particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\napp.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py"}]}
976
113
gh_patches_debug_2958
rasdani/github-patches
git_diff
learningequality__kolibri-4689
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Shows sorry! something went wrong. ### Observed behavior When coach is going to the recent tab to see exercise and video progress then it shows error. ### Expected behavior It must show progress instead of error. ### Steps to reproduce 1. Login with coach. 2. go to the recent tab. 3. Go to the exercise/video and see. ### Context * Kolibri version : kolibri 0.11.0 * Operating system : Ubuntu 14.04 * Browser : chrome ### Screenshot ![1](https://user-images.githubusercontent.com/12776071/50138341-4d958180-02c4-11e9-92b7-01a9fb28acc2.png) ![2](https://user-images.githubusercontent.com/12776071/50138342-4d958180-02c4-11e9-9426-fe0709d16751.png) ![3](https://user-images.githubusercontent.com/12776071/50138343-4e2e1800-02c4-11e9-9ac4-e520796024ed.png) </issue> <code> [start of kolibri/plugins/coach/api.py] 1 import datetime 2 3 from dateutil.parser import parse 4 from django.db import connection 5 from django.db.models import Min 6 from django.db.models import Q 7 from django.utils import timezone 8 from rest_framework import mixins 9 from rest_framework import pagination 10 from rest_framework import permissions 11 from rest_framework import viewsets 12 13 from .serializers import ContentReportSerializer 14 from .serializers import ContentSummarySerializer 15 from .serializers import LessonReportSerializer 16 from .serializers import UserReportSerializer 17 from .utils.return_users import get_members_or_user 18 from kolibri.core.auth.constants import collection_kinds 19 from kolibri.core.auth.constants import role_kinds 20 from kolibri.core.auth.models import Collection 21 from kolibri.core.auth.models import FacilityUser 22 from kolibri.core.content.models import ContentNode 23 from kolibri.core.decorators import query_params_required 24 from kolibri.core.lessons.models import Lesson 25 from kolibri.core.logger.models import ContentSummaryLog 26 from kolibri.core.logger.models import MasteryLog 27 28 29 collection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user']) 30 31 32 class OptionalPageNumberPagination(pagination.PageNumberPagination): 33 """ 34 Pagination class that allows for page number-style pagination, when requested. 35 To activate, the `page_size` argument must be set. For example, to request the first 20 records: 36 `?page_size=20&page=1` 37 """ 38 page_size = None 39 page_size_query_param = "page_size" 40 41 42 class KolibriReportPermissions(permissions.BasePermission): 43 44 # check if requesting user has permission for collection or user 45 def has_permission(self, request, view): 46 if isinstance(view, LessonReportViewset): 47 report_pk = view.kwargs.get('pk', None) 48 if report_pk is None: 49 # If requesting list view, check if requester has coach/admin permissions on whole facility 50 collection_kind = 'facility' 51 collection_or_user_pk = request.user.facility_id 52 else: 53 # If requesting detail view, only check if requester has permissions on the Classroom 54 collection_kind = 'classroom' 55 collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id 56 57 else: 58 collection_kind = view.kwargs.get('collection_kind', 'user') 59 collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk')) 60 61 allowed_roles = [role_kinds.ADMIN, role_kinds.COACH] 62 try: 63 if 'user' == collection_kind: 64 return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk)) 65 else: 66 return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk)) 67 except (FacilityUser.DoesNotExist, Collection.DoesNotExist): 68 return False 69 70 71 @query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str) 72 class ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): 73 74 permission_classes = (KolibriReportPermissions,) 75 76 77 class UserReportViewSet(ReportBaseViewSet): 78 79 pagination_class = OptionalPageNumberPagination 80 serializer_class = UserReportSerializer 81 82 def get_queryset(self): 83 assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint' 84 return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']) 85 86 87 class ContentReportViewSet(ReportBaseViewSet): 88 89 pagination_class = OptionalPageNumberPagination 90 serializer_class = ContentReportSerializer 91 92 def get_queryset(self): 93 content_node_id = self.kwargs['content_node_id'] 94 return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft') 95 96 97 @query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str) 98 class ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet): 99 100 permission_classes = (KolibriReportPermissions,) 101 serializer_class = ContentSummarySerializer 102 103 def get_queryset(self): 104 channel_id = self.kwargs['channel_id'] 105 return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft') 106 107 108 class RecentReportViewSet(ReportBaseViewSet): 109 110 pagination_class = OptionalPageNumberPagination 111 serializer_class = ContentReportSerializer 112 113 def get_queryset(self): 114 channel_id = self.kwargs['channel_id'] 115 attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False) 116 query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id']) 117 if self.request.query_params.get('last_active_time'): 118 # Last active time specified 119 datetime_cutoff = parse(self.request.query_params.get('last_active_time')) 120 else: 121 datetime_cutoff = timezone.now() - datetime.timedelta(7) 122 # Set on the kwargs to pass into the serializer 123 self.kwargs['last_active_time'] = datetime_cutoff.isoformat() 124 recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter( 125 Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs), 126 user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])), 127 end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True) 128 if connection.vendor == 'postgresql': 129 pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter( 130 channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True) 131 else: 132 # note from rtibbles: 133 # As good as either I or jamalex could come up with to ensure that we only return 134 # unique content_id'ed ContentNodes from the coach recent report endpoint. 135 # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only 136 pks_with_unique_content_ids = ContentNode.objects.filter( 137 channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate( 138 pk=Min('pk')).values_list('pk', flat=True) 139 return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft') 140 141 142 class LessonReportViewset(viewsets.ReadOnlyModelViewSet): 143 permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,) 144 serializer_class = LessonReportSerializer 145 queryset = Lesson.objects.all() 146 [end of kolibri/plugins/coach/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py --- a/kolibri/plugins/coach/api.py +++ b/kolibri/plugins/coach/api.py @@ -102,7 +102,7 @@ def get_queryset(self): channel_id = self.kwargs['channel_id'] - return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft') + return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft') class RecentReportViewSet(ReportBaseViewSet):
{"golden_diff": "diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py\n--- a/kolibri/plugins/coach/api.py\n+++ b/kolibri/plugins/coach/api.py\n@@ -102,7 +102,7 @@\n \n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n- return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\n+ return ContentNode.objects.filter(Q(channel_id=channel_id)).order_by('lft')\n \n \n class RecentReportViewSet(ReportBaseViewSet):\n", "issue": "Shows sorry! something went wrong.\n### Observed behavior\r\nWhen coach is going to the recent tab to see exercise and video progress then it shows error.\r\n\r\n\r\n### Expected behavior\r\nIt must show progress instead of error.\r\n\r\n### Steps to reproduce\r\n1. Login with coach.\r\n2. go to the recent tab.\r\n3. Go to the exercise/video and see.\r\n\r\n### Context\r\n\r\n * Kolibri version : kolibri 0.11.0\r\n * Operating system : Ubuntu 14.04\r\n * Browser : chrome\r\n\r\n### Screenshot\r\n![1](https://user-images.githubusercontent.com/12776071/50138341-4d958180-02c4-11e9-92b7-01a9fb28acc2.png)\r\n![2](https://user-images.githubusercontent.com/12776071/50138342-4d958180-02c4-11e9-9426-fe0709d16751.png)\r\n![3](https://user-images.githubusercontent.com/12776071/50138343-4e2e1800-02c4-11e9-9ac4-e520796024ed.png)\r\n\n", "before_files": [{"content": "import datetime\n\nfrom dateutil.parser import parse\nfrom django.db import connection\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom rest_framework import mixins\nfrom rest_framework import pagination\nfrom rest_framework import permissions\nfrom rest_framework import viewsets\n\nfrom .serializers import ContentReportSerializer\nfrom .serializers import ContentSummarySerializer\nfrom .serializers import LessonReportSerializer\nfrom .serializers import UserReportSerializer\nfrom .utils.return_users import get_members_or_user\nfrom kolibri.core.auth.constants import collection_kinds\nfrom kolibri.core.auth.constants import role_kinds\nfrom kolibri.core.auth.models import Collection\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.decorators import query_params_required\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import ContentSummaryLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncollection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass KolibriReportPermissions(permissions.BasePermission):\n\n # check if requesting user has permission for collection or user\n def has_permission(self, request, view):\n if isinstance(view, LessonReportViewset):\n report_pk = view.kwargs.get('pk', None)\n if report_pk is None:\n # If requesting list view, check if requester has coach/admin permissions on whole facility\n collection_kind = 'facility'\n collection_or_user_pk = request.user.facility_id\n else:\n # If requesting detail view, only check if requester has permissions on the Classroom\n collection_kind = 'classroom'\n collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id\n\n else:\n collection_kind = view.kwargs.get('collection_kind', 'user')\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n try:\n if 'user' == collection_kind:\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n else:\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n except (FacilityUser.DoesNotExist, Collection.DoesNotExist):\n return False\n\n\n@query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)\nclass ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n\n\nclass UserReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n\n\nclass ContentReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n content_node_id = self.kwargs['content_node_id']\n return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')\n\n\n@query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)\nclass ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = ContentSummarySerializer\n\n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')\n\n\nclass RecentReportViewSet(ReportBaseViewSet):\n\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n channel_id = self.kwargs['channel_id']\n attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n else:\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),\n end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)\n if connection.vendor == 'postgresql':\n pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(\n channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)\n else:\n # note from rtibbles:\n # As good as either I or jamalex could come up with to ensure that we only return\n # unique content_id'ed ContentNodes from the coach recent report endpoint.\n # Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only\n pks_with_unique_content_ids = ContentNode.objects.filter(\n channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(\n pk=Min('pk')).values_list('pk', flat=True)\n return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')\n\n\nclass LessonReportViewset(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)\n serializer_class = LessonReportSerializer\n queryset = Lesson.objects.all()\n", "path": "kolibri/plugins/coach/api.py"}]}
2,537
131
gh_patches_debug_30051
rasdani/github-patches
git_diff
doccano__doccano-2228
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a function to filter labels When I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower. Thanks for every contributor! </issue> <code> [start of backend/examples/filters.py] 1 from django.db.models import Count, Q 2 from django_filters.rest_framework import BooleanFilter, FilterSet 3 4 from .models import Example 5 6 7 class ExampleFilter(FilterSet): 8 confirmed = BooleanFilter(field_name="states", method="filter_by_state") 9 10 def filter_by_state(self, queryset, field_name, is_confirmed: bool): 11 queryset = queryset.annotate( 12 num_confirm=Count( 13 expression=field_name, 14 filter=Q(**{f"{field_name}__confirmed_by": self.request.user}) 15 | Q(project__collaborative_annotation=True), 16 ) 17 ) 18 if is_confirmed: 19 queryset = queryset.filter(num_confirm__gte=1) 20 else: 21 queryset = queryset.filter(num_confirm__lte=0) 22 return queryset 23 24 class Meta: 25 model = Example 26 fields = ("project", "text", "created_at", "updated_at") 27 [end of backend/examples/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/examples/filters.py b/backend/examples/filters.py --- a/backend/examples/filters.py +++ b/backend/examples/filters.py @@ -1,11 +1,12 @@ -from django.db.models import Count, Q -from django_filters.rest_framework import BooleanFilter, FilterSet +from django.db.models import Count, Q, QuerySet +from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet from .models import Example class ExampleFilter(FilterSet): confirmed = BooleanFilter(field_name="states", method="filter_by_state") + label = CharFilter(method="filter_by_label") def filter_by_state(self, queryset, field_name, is_confirmed: bool): queryset = queryset.annotate( @@ -21,6 +22,35 @@ queryset = queryset.filter(num_confirm__lte=0) return queryset + def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet: + """Filter examples by a given label name. + + This performs filtering on all of the following labels at once: + - categories + - spans + - relations + - bboxes + - segmentations + + Todo: Consider project type to make filtering more efficient. + + Args: + queryset (QuerySet): QuerySet to filter. + field_name (str): This equals to `label`. + label (str): The label name to filter. + + Returns: + QuerySet: Filtered examples. + """ + queryset = queryset.filter( + Q(categories__label__text=label) + | Q(spans__label__text=label) + | Q(relations__type__text=label) + | Q(bboxes__label__text=label) + | Q(segmentations__label__text=label) + ) + return queryset + class Meta: model = Example - fields = ("project", "text", "created_at", "updated_at") + fields = ("project", "text", "created_at", "updated_at", "label")
{"golden_diff": "diff --git a/backend/examples/filters.py b/backend/examples/filters.py\n--- a/backend/examples/filters.py\n+++ b/backend/examples/filters.py\n@@ -1,11 +1,12 @@\n-from django.db.models import Count, Q\n-from django_filters.rest_framework import BooleanFilter, FilterSet\n+from django.db.models import Count, Q, QuerySet\n+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\n \n from .models import Example\n \n \n class ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n+ label = CharFilter(method=\"filter_by_label\")\n \n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n@@ -21,6 +22,35 @@\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n \n+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\n+ \"\"\"Filter examples by a given label name.\n+\n+ This performs filtering on all of the following labels at once:\n+ - categories\n+ - spans\n+ - relations\n+ - bboxes\n+ - segmentations\n+\n+ Todo: Consider project type to make filtering more efficient.\n+\n+ Args:\n+ queryset (QuerySet): QuerySet to filter.\n+ field_name (str): This equals to `label`.\n+ label (str): The label name to filter.\n+\n+ Returns:\n+ QuerySet: Filtered examples.\n+ \"\"\"\n+ queryset = queryset.filter(\n+ Q(categories__label__text=label)\n+ | Q(spans__label__text=label)\n+ | Q(relations__type__text=label)\n+ | Q(bboxes__label__text=label)\n+ | Q(segmentations__label__text=label)\n+ )\n+ return queryset\n+\n class Meta:\n model = Example\n- fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n+ fields = (\"project\", \"text\", \"created_at\", \"updated_at\", \"label\")\n", "issue": "Add a function to filter labels\nWhen I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.\r\n\r\nThanks for every contributor!\n", "before_files": [{"content": "from django.db.models import Count, Q\nfrom django_filters.rest_framework import BooleanFilter, FilterSet\n\nfrom .models import Example\n\n\nclass ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n num_confirm=Count(\n expression=field_name,\n filter=Q(**{f\"{field_name}__confirmed_by\": self.request.user})\n | Q(project__collaborative_annotation=True),\n )\n )\n if is_confirmed:\n queryset = queryset.filter(num_confirm__gte=1)\n else:\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n\n class Meta:\n model = Example\n fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n", "path": "backend/examples/filters.py"}]}
879
463
gh_patches_debug_13543
rasdani/github-patches
git_diff
pre-commit__pre-commit-33
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pre-commit -i does not install the file with +x No executable = no run :'( </issue> <code> [start of pre_commit/git.py] 1 import functools 2 import os 3 import os.path 4 import pkg_resources 5 import re 6 from plumbum import local 7 8 from pre_commit.util import memoize_by_cwd 9 10 11 def _get_root_new(): 12 path = os.getcwd() 13 while len(path) > 1: 14 if os.path.exists(os.path.join(path, '.git')): 15 return path 16 else: 17 path = os.path.normpath(os.path.join(path, '../')) 18 raise AssertionError('called from outside of the gits') 19 20 21 @memoize_by_cwd 22 def get_root(): 23 return _get_root_new() 24 25 26 @memoize_by_cwd 27 def get_pre_commit_path(): 28 return os.path.join(get_root(), '.git/hooks/pre-commit') 29 30 31 def create_pre_commit(): 32 path = get_pre_commit_path() 33 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh') 34 local.path(path).write(local.path(pre_commit_file).read()) 35 36 37 def remove_pre_commit(): 38 local.path(get_pre_commit_path()).delete() 39 40 41 def get_head_sha(git_repo_path): 42 with local.cwd(git_repo_path): 43 return local['git']['rev-parse', 'HEAD']().strip() 44 45 46 @memoize_by_cwd 47 def get_staged_files(): 48 return local['git']['diff', '--staged', '--name-only']().splitlines() 49 50 51 @memoize_by_cwd 52 def get_all_files(): 53 return local['git']['ls-files']().splitlines() 54 55 56 def get_files_matching(all_file_list_strategy): 57 @functools.wraps(all_file_list_strategy) 58 @memoize_by_cwd 59 def wrapper(expr): 60 regex = re.compile(expr) 61 return set(filter(os.path.exists, ( 62 filename 63 for filename in all_file_list_strategy() 64 if regex.search(filename) 65 ))) 66 return wrapper 67 68 69 get_staged_files_matching = get_files_matching(get_staged_files) 70 get_all_files_matching = get_files_matching(get_all_files) 71 [end of pre_commit/git.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/git.py b/pre_commit/git.py --- a/pre_commit/git.py +++ b/pre_commit/git.py @@ -3,6 +3,7 @@ import os.path import pkg_resources import re +import stat from plumbum import local from pre_commit.util import memoize_by_cwd @@ -32,6 +33,8 @@ path = get_pre_commit_path() pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh') local.path(path).write(local.path(pre_commit_file).read()) + original_mode = os.stat(path).st_mode + os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) def remove_pre_commit():
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -3,6 +3,7 @@\n import os.path\n import pkg_resources\n import re\n+import stat\n from plumbum import local\n \n from pre_commit.util import memoize_by_cwd\n@@ -32,6 +33,8 @@\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n+ original_mode = os.stat(path).st_mode\n+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n \n \n def remove_pre_commit():\n", "issue": "pre-commit -i does not install the file with +x\nNo executable = no run :'(\n\n", "before_files": [{"content": "import functools\nimport os\nimport os.path\nimport pkg_resources\nimport re\nfrom plumbum import local\n\nfrom pre_commit.util import memoize_by_cwd\n\n\ndef _get_root_new():\n path = os.getcwd()\n while len(path) > 1:\n if os.path.exists(os.path.join(path, '.git')):\n return path\n else:\n path = os.path.normpath(os.path.join(path, '../'))\n raise AssertionError('called from outside of the gits')\n\n\n@memoize_by_cwd\ndef get_root():\n return _get_root_new()\n\n\n@memoize_by_cwd\ndef get_pre_commit_path():\n return os.path.join(get_root(), '.git/hooks/pre-commit')\n\n\ndef create_pre_commit():\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n\n\ndef remove_pre_commit():\n local.path(get_pre_commit_path()).delete()\n\n\ndef get_head_sha(git_repo_path):\n with local.cwd(git_repo_path):\n return local['git']['rev-parse', 'HEAD']().strip()\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return local['git']['diff', '--staged', '--name-only']().splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return local['git']['ls-files']().splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(expr):\n regex = re.compile(expr)\n return set(filter(os.path.exists, (\n filename\n for filename in all_file_list_strategy()\n if regex.search(filename)\n )))\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\n", "path": "pre_commit/git.py"}]}
1,108
170
gh_patches_debug_37725
rasdani/github-patches
git_diff
pwndbg__pwndbg-291
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typesetting seems to be wrong ![image](https://cloud.githubusercontent.com/assets/7897423/24843862/42e004bc-1dd9-11e7-8447-b8cf87ed6fe1.png) </issue> <code> [start of pwndbg/__init__.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import gdb 9 10 import pwndbg.android 11 import pwndbg.arch 12 import pwndbg.arguments 13 import pwndbg.argv 14 import pwndbg.color 15 import pwndbg.commands 16 import pwndbg.commands.argv 17 import pwndbg.commands.aslr 18 import pwndbg.commands.auxv 19 import pwndbg.commands.checksec 20 import pwndbg.commands.config 21 import pwndbg.commands.context 22 import pwndbg.commands.cpsr 23 import pwndbg.commands.dt 24 import pwndbg.commands.dumpargs 25 import pwndbg.commands.elf 26 import pwndbg.commands.gdbinit 27 import pwndbg.commands.got 28 import pwndbg.commands.heap 29 import pwndbg.commands.hexdump 30 import pwndbg.commands.ida 31 import pwndbg.commands.misc 32 import pwndbg.commands.next 33 import pwndbg.commands.peda 34 import pwndbg.commands.procinfo 35 import pwndbg.commands.radare2 36 import pwndbg.commands.reload 37 import pwndbg.commands.rop 38 import pwndbg.commands.ropper 39 import pwndbg.commands.search 40 import pwndbg.commands.segments 41 import pwndbg.commands.shell 42 import pwndbg.commands.stack 43 import pwndbg.commands.start 44 import pwndbg.commands.telescope 45 import pwndbg.commands.theme 46 import pwndbg.commands.version 47 import pwndbg.commands.vmmap 48 import pwndbg.commands.windbg 49 import pwndbg.commands.xor 50 import pwndbg.constants 51 import pwndbg.disasm 52 import pwndbg.disasm.arm 53 import pwndbg.disasm.jump 54 import pwndbg.disasm.mips 55 import pwndbg.disasm.ppc 56 import pwndbg.disasm.sparc 57 import pwndbg.disasm.x86 58 import pwndbg.dt 59 import pwndbg.elf 60 import pwndbg.exception 61 import pwndbg.heap 62 import pwndbg.inthook 63 import pwndbg.memory 64 import pwndbg.net 65 import pwndbg.proc 66 import pwndbg.prompt 67 import pwndbg.regs 68 import pwndbg.stack 69 import pwndbg.typeinfo 70 import pwndbg.version 71 import pwndbg.vmmap 72 import pwndbg.wrappers 73 74 __version__ = pwndbg.version.__version__ 75 version = __version__ 76 77 try: 78 import unicorn 79 import pwndbg.emu 80 except: 81 pass 82 83 __all__ = [ 84 'arch', 85 'auxv', 86 'chain', 87 'color', 88 'compat', 89 'disasm', 90 'dt', 91 'elf', 92 'enhance', 93 'events', 94 'file', 95 'function', 96 'heap', 97 'hexdump', 98 'ida', 99 'info', 100 'linkmap', 101 'malloc', 102 'memoize', 103 'memory', 104 'proc', 105 'regs', 106 'remote', 107 'search', 108 'stack', 109 'strings', 110 'symbol', 111 'typeinfo', 112 'ui', 113 'vmmap' 114 ] 115 116 prompt = "pwndbg> " 117 prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH 118 prompt = pwndbg.color.red(prompt) 119 prompt = pwndbg.color.bold(prompt) 120 prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX 121 122 pre_commands = """ 123 set confirm off 124 set verbose off 125 set prompt %s 126 set pagination off 127 set height 0 128 set history expansion on 129 set history save on 130 set follow-fork-mode child 131 set backtrace past-main on 132 set step-mode on 133 set print pretty on 134 set width 0 135 set print elements 15 136 handle SIGALRM nostop print nopass 137 handle SIGBUS stop print nopass 138 handle SIGPIPE nostop print nopass 139 handle SIGSEGV stop print nopass 140 """.strip() % prompt 141 142 for line in pre_commands.strip().splitlines(): 143 gdb.execute(line) 144 145 # This may throw an exception, see pwndbg/pwndbg#27 146 try: 147 gdb.execute("set disassembly-flavor intel") 148 except gdb.error: 149 pass 150 [end of pwndbg/__init__.py] [start of pwndbg/ui.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 A few helpers for making things print pretty-like. 5 """ 6 from __future__ import absolute_import 7 from __future__ import division 8 from __future__ import print_function 9 from __future__ import unicode_literals 10 11 import fcntl 12 import struct 13 import sys 14 import termios 15 16 import pwndbg.arch 17 import pwndbg.color.context as C 18 import pwndbg.color.theme as theme 19 import pwndbg.config as config 20 21 theme.Parameter('banner-separator', '─', 'repeated banner separator character') 22 23 def banner(title): 24 title = title.upper() 25 try: 26 _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234')) 27 except: 28 width = 80 29 width -= 2 30 return C.banner(("[{:%s^%ss}]" % (config.banner_separator, width)).format(title)) 31 32 def addrsz(address): 33 address = int(address) & pwndbg.arch.ptrmask 34 return "%{}x".format(2*pwndbg.arch.ptrsize) % address 35 [end of pwndbg/ui.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py --- a/pwndbg/__init__.py +++ b/pwndbg/__init__.py @@ -5,6 +5,8 @@ from __future__ import print_function from __future__ import unicode_literals +import signal + import gdb import pwndbg.android @@ -67,6 +69,7 @@ import pwndbg.regs import pwndbg.stack import pwndbg.typeinfo +import pwndbg.ui import pwndbg.version import pwndbg.vmmap import pwndbg.wrappers @@ -131,13 +134,13 @@ set backtrace past-main on set step-mode on set print pretty on -set width 0 +set width %i set print elements 15 handle SIGALRM nostop print nopass handle SIGBUS stop print nopass handle SIGPIPE nostop print nopass handle SIGSEGV stop print nopass -""".strip() % prompt +""".strip() % (prompt, pwndbg.ui.get_window_size()[1]) for line in pre_commands.strip().splitlines(): gdb.execute(line) @@ -147,3 +150,7 @@ gdb.execute("set disassembly-flavor intel") except gdb.error: pass + + +# handle resize event to align width and completion +signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute("set width %i" % pwndbg.ui.get_window_size()[1])) diff --git a/pwndbg/ui.py b/pwndbg/ui.py --- a/pwndbg/ui.py +++ b/pwndbg/ui.py @@ -9,6 +9,7 @@ from __future__ import unicode_literals import fcntl +import os import struct import sys import termios @@ -22,13 +23,21 @@ def banner(title): title = title.upper() - try: - _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234')) - except: - width = 80 + _height, width = get_window_size() width -= 2 return C.banner(("[{:%s^%ss}]" % (config.banner_separator, width)).format(title)) def addrsz(address): address = int(address) & pwndbg.arch.ptrmask return "%{}x".format(2*pwndbg.arch.ptrsize) % address + +def get_window_size(): + fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80))) + if not sys.stdin.isatty: + return fallback + try: + # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg + rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234')) + except: + rows, cols = fallback + return rows, cols
{"golden_diff": "diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py\n--- a/pwndbg/__init__.py\n+++ b/pwndbg/__init__.py\n@@ -5,6 +5,8 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import signal\n+\n import gdb\n \n import pwndbg.android\n@@ -67,6 +69,7 @@\n import pwndbg.regs\n import pwndbg.stack\n import pwndbg.typeinfo\n+import pwndbg.ui\n import pwndbg.version\n import pwndbg.vmmap\n import pwndbg.wrappers\n@@ -131,13 +134,13 @@\n set backtrace past-main on\n set step-mode on\n set print pretty on\n-set width 0\n+set width %i\n set print elements 15\n handle SIGALRM nostop print nopass\n handle SIGBUS stop print nopass\n handle SIGPIPE nostop print nopass\n handle SIGSEGV stop print nopass\n-\"\"\".strip() % prompt\n+\"\"\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\n \n for line in pre_commands.strip().splitlines():\n gdb.execute(line)\n@@ -147,3 +150,7 @@\n gdb.execute(\"set disassembly-flavor intel\")\n except gdb.error:\n pass\n+\n+\n+# handle resize event to align width and completion\n+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]))\ndiff --git a/pwndbg/ui.py b/pwndbg/ui.py\n--- a/pwndbg/ui.py\n+++ b/pwndbg/ui.py\n@@ -9,6 +9,7 @@\n from __future__ import unicode_literals\n \n import fcntl\n+import os\n import struct\n import sys\n import termios\n@@ -22,13 +23,21 @@\n \n def banner(title):\n title = title.upper()\n- try:\n- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n- except:\n- width = 80\n+ _height, width = get_window_size()\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n \n def addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n+\n+def get_window_size():\n+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\n+ if not sys.stdin.isatty:\n+ return fallback\n+ try:\n+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\n+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n+ except:\n+ rows, cols = fallback\n+ return rows, cols\n", "issue": "Typesetting seems to be wrong\n![image](https://cloud.githubusercontent.com/assets/7897423/24843862/42e004bc-1dd9-11e7-8447-b8cf87ed6fe1.png)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.android\nimport pwndbg.arch\nimport pwndbg.arguments\nimport pwndbg.argv\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.argv\nimport pwndbg.commands.aslr\nimport pwndbg.commands.auxv\nimport pwndbg.commands.checksec\nimport pwndbg.commands.config\nimport pwndbg.commands.context\nimport pwndbg.commands.cpsr\nimport pwndbg.commands.dt\nimport pwndbg.commands.dumpargs\nimport pwndbg.commands.elf\nimport pwndbg.commands.gdbinit\nimport pwndbg.commands.got\nimport pwndbg.commands.heap\nimport pwndbg.commands.hexdump\nimport pwndbg.commands.ida\nimport pwndbg.commands.misc\nimport pwndbg.commands.next\nimport pwndbg.commands.peda\nimport pwndbg.commands.procinfo\nimport pwndbg.commands.radare2\nimport pwndbg.commands.reload\nimport pwndbg.commands.rop\nimport pwndbg.commands.ropper\nimport pwndbg.commands.search\nimport pwndbg.commands.segments\nimport pwndbg.commands.shell\nimport pwndbg.commands.stack\nimport pwndbg.commands.start\nimport pwndbg.commands.telescope\nimport pwndbg.commands.theme\nimport pwndbg.commands.version\nimport pwndbg.commands.vmmap\nimport pwndbg.commands.windbg\nimport pwndbg.commands.xor\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.disasm.arm\nimport pwndbg.disasm.jump\nimport pwndbg.disasm.mips\nimport pwndbg.disasm.ppc\nimport pwndbg.disasm.sparc\nimport pwndbg.disasm.x86\nimport pwndbg.dt\nimport pwndbg.elf\nimport pwndbg.exception\nimport pwndbg.heap\nimport pwndbg.inthook\nimport pwndbg.memory\nimport pwndbg.net\nimport pwndbg.proc\nimport pwndbg.prompt\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\nimport pwndbg.version\nimport pwndbg.vmmap\nimport pwndbg.wrappers\n\n__version__ = pwndbg.version.__version__\nversion = __version__\n\ntry:\n import unicorn\n import pwndbg.emu\nexcept:\n pass\n\n__all__ = [\n'arch',\n'auxv',\n'chain',\n'color',\n'compat',\n'disasm',\n'dt',\n'elf',\n'enhance',\n'events',\n'file',\n'function',\n'heap',\n'hexdump',\n'ida',\n'info',\n'linkmap',\n'malloc',\n'memoize',\n'memory',\n'proc',\n'regs',\n'remote',\n'search',\n'stack',\n'strings',\n'symbol',\n'typeinfo',\n'ui',\n'vmmap'\n]\n\nprompt = \"pwndbg> \"\nprompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\nprompt = pwndbg.color.red(prompt)\nprompt = pwndbg.color.bold(prompt)\nprompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\npre_commands = \"\"\"\nset confirm off\nset verbose off\nset prompt %s\nset pagination off\nset height 0\nset history expansion on\nset history save on\nset follow-fork-mode child\nset backtrace past-main on\nset step-mode on\nset print pretty on\nset width 0\nset print elements 15\nhandle SIGALRM nostop print nopass\nhandle SIGBUS stop print nopass\nhandle SIGPIPE nostop print nopass\nhandle SIGSEGV stop print nopass\n\"\"\".strip() % prompt\n\nfor line in pre_commands.strip().splitlines():\n gdb.execute(line)\n\n# This may throw an exception, see pwndbg/pwndbg#27\ntry:\n gdb.execute(\"set disassembly-flavor intel\")\nexcept gdb.error:\n pass\n", "path": "pwndbg/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA few helpers for making things print pretty-like.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport fcntl\nimport struct\nimport sys\nimport termios\n\nimport pwndbg.arch\nimport pwndbg.color.context as C\nimport pwndbg.color.theme as theme\nimport pwndbg.config as config\n\ntheme.Parameter('banner-separator', '\u2500', 'repeated banner separator character')\n\ndef banner(title):\n title = title.upper()\n try:\n _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n except:\n width = 80\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n\ndef addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n", "path": "pwndbg/ui.py"}]}
2,183
707
gh_patches_debug_39662
rasdani/github-patches
git_diff
jupyterhub__zero-to-jupyterhub-k8s-531
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> culler is failing and exiting when culling users and servers are slow to stop Two issues: 1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this 2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here. </issue> <code> [start of images/hub/cull_idle_servers.py] 1 #!/usr/bin/env python3 2 # Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py 3 """script to monitor and cull idle single-user servers 4 5 Caveats: 6 7 last_activity is not updated with high frequency, 8 so cull timeout should be greater than the sum of: 9 10 - single-user websocket ping interval (default: 30s) 11 - JupyterHub.last_activity_interval (default: 5 minutes) 12 13 You can run this as a service managed by JupyterHub with this in your config:: 14 15 16 c.JupyterHub.services = [ 17 { 18 'name': 'cull-idle', 19 'admin': True, 20 'command': 'python cull_idle_servers.py --timeout=3600'.split(), 21 } 22 ] 23 24 Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`: 25 26 export JUPYTERHUB_API_TOKEN=`jupyterhub token` 27 python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api] 28 """ 29 30 import datetime 31 import json 32 import os 33 34 from dateutil.parser import parse as parse_date 35 36 from tornado.gen import coroutine 37 from tornado.log import app_log 38 from tornado.httpclient import AsyncHTTPClient, HTTPRequest 39 from tornado.ioloop import IOLoop, PeriodicCallback 40 from tornado.options import define, options, parse_command_line 41 42 43 @coroutine 44 def cull_idle(url, api_token, timeout, cull_users=False): 45 """Shutdown idle single-user servers 46 47 If cull_users, inactive *users* will be deleted as well. 48 """ 49 auth_header = { 50 'Authorization': 'token %s' % api_token 51 } 52 req = HTTPRequest(url=url + '/users', 53 headers=auth_header, 54 ) 55 now = datetime.datetime.utcnow() 56 cull_limit = now - datetime.timedelta(seconds=timeout) 57 client = AsyncHTTPClient() 58 resp = yield client.fetch(req) 59 users = json.loads(resp.body.decode('utf8', 'replace')) 60 futures = [] 61 62 @coroutine 63 def cull_one(user, last_activity): 64 """cull one user""" 65 66 # shutdown server first. Hub doesn't allow deleting users with running servers. 67 if user['server']: 68 app_log.info("Culling server for %s (inactive since %s)", user['name'], last_activity) 69 req = HTTPRequest(url=url + '/users/%s/server' % user['name'], 70 method='DELETE', 71 headers=auth_header, 72 ) 73 yield client.fetch(req) 74 if cull_users: 75 app_log.info("Culling user %s (inactive since %s)", user['name'], last_activity) 76 req = HTTPRequest(url=url + '/users/%s' % user['name'], 77 method='DELETE', 78 headers=auth_header, 79 ) 80 yield client.fetch(req) 81 82 for user in users: 83 if not user['server'] and not cull_users: 84 # server not running and not culling users, nothing to do 85 continue 86 last_activity = parse_date(user['last_activity']) 87 if last_activity < cull_limit: 88 futures.append((user['name'], cull_one(user, last_activity))) 89 else: 90 app_log.debug("Not culling %s (active since %s)", user['name'], last_activity) 91 92 for (name, f) in futures: 93 yield f 94 app_log.debug("Finished culling %s", name) 95 96 97 if __name__ == '__main__': 98 define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help="The JupyterHub API URL") 99 define('timeout', default=600, help="The idle timeout (in seconds)") 100 define('cull_every', default=0, help="The interval (in seconds) for checking for idle servers to cull") 101 define('cull_users', default=False, 102 help="""Cull users in addition to servers. 103 This is for use in temporary-user cases such as tmpnb.""", 104 ) 105 106 parse_command_line() 107 if not options.cull_every: 108 options.cull_every = options.timeout // 2 109 api_token = os.environ['JUPYTERHUB_API_TOKEN'] 110 111 loop = IOLoop.current() 112 cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users) 113 # run once before scheduling periodic call 114 loop.run_sync(cull) 115 # schedule periodic cull 116 pc = PeriodicCallback(cull, 1e3 * options.cull_every) 117 pc.start() 118 try: 119 loop.start() 120 except KeyboardInterrupt: 121 pass 122 [end of images/hub/cull_idle_servers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py --- a/images/hub/cull_idle_servers.py +++ b/images/hub/cull_idle_servers.py @@ -70,7 +70,15 @@ method='DELETE', headers=auth_header, ) - yield client.fetch(req) + resp = yield client.fetch(req) + if resp.code == 202: + msg = "Server for {} is slow to stop.".format(user['name']) + if cull_users: + app_log.warning(msg + " Not culling user yet.") + # return here so we don't continue to cull the user + # which will fail if the server is still trying to shutdown + return + app_log.warning(msg) if cull_users: app_log.info("Culling user %s (inactive since %s)", user['name'], last_activity) req = HTTPRequest(url=url + '/users/%s' % user['name'], @@ -83,12 +91,19 @@ if not user['server'] and not cull_users: # server not running and not culling users, nothing to do continue + if not user['last_activity']: + continue last_activity = parse_date(user['last_activity']) if last_activity < cull_limit: + # user might be in a transition (e.g. starting or stopping) + # don't try to cull if this is happening + if user['pending']: + app_log.warning("Not culling user %s with pending %s", user['name'], user['pending']) + continue futures.append((user['name'], cull_one(user, last_activity))) else: app_log.debug("Not culling %s (active since %s)", user['name'], last_activity) - + for (name, f) in futures: yield f app_log.debug("Finished culling %s", name) @@ -102,16 +117,17 @@ help="""Cull users in addition to servers. This is for use in temporary-user cases such as tmpnb.""", ) - + parse_command_line() if not options.cull_every: options.cull_every = options.timeout // 2 api_token = os.environ['JUPYTERHUB_API_TOKEN'] - + loop = IOLoop.current() cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users) - # run once before scheduling periodic call - loop.run_sync(cull) + # schedule first cull immediately + # because PeriodicCallback doesn't start until the end of the first interval + loop.add_callback(cull) # schedule periodic cull pc = PeriodicCallback(cull, 1e3 * options.cull_every) pc.start()
{"golden_diff": "diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py\n--- a/images/hub/cull_idle_servers.py\n+++ b/images/hub/cull_idle_servers.py\n@@ -70,7 +70,15 @@\n method='DELETE',\n headers=auth_header,\n )\n- yield client.fetch(req)\n+ resp = yield client.fetch(req)\n+ if resp.code == 202:\n+ msg = \"Server for {} is slow to stop.\".format(user['name'])\n+ if cull_users:\n+ app_log.warning(msg + \" Not culling user yet.\")\n+ # return here so we don't continue to cull the user\n+ # which will fail if the server is still trying to shutdown\n+ return\n+ app_log.warning(msg)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n@@ -83,12 +91,19 @@\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n+ if not user['last_activity']:\n+ continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n+ # user might be in a transition (e.g. starting or stopping)\n+ # don't try to cull if this is happening\n+ if user['pending']:\n+ app_log.warning(\"Not culling user %s with pending %s\", user['name'], user['pending'])\n+ continue\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n- \n+\n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n@@ -102,16 +117,17 @@\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n- \n+\n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n- \n+\n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n- # run once before scheduling periodic call\n- loop.run_sync(cull)\n+ # schedule first cull immediately\n+ # because PeriodicCallback doesn't start until the end of the first interval\n+ loop.add_callback(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n", "issue": "culler is failing and exiting when culling users and servers are slow to stop\nTwo issues:\r\n\r\n1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this\r\n2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py\n\"\"\"script to monitor and cull idle single-user servers\n\nCaveats:\n\nlast_activity is not updated with high frequency,\nso cull timeout should be greater than the sum of:\n\n- single-user websocket ping interval (default: 30s)\n- JupyterHub.last_activity_interval (default: 5 minutes)\n\nYou can run this as a service managed by JupyterHub with this in your config::\n\n\n c.JupyterHub.services = [\n {\n 'name': 'cull-idle',\n 'admin': True,\n 'command': 'python cull_idle_servers.py --timeout=3600'.split(),\n }\n ]\n\nOr run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:\n\n export JUPYTERHUB_API_TOKEN=`jupyterhub token`\n python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]\n\"\"\"\n\nimport datetime\nimport json\nimport os\n\nfrom dateutil.parser import parse as parse_date\n\nfrom tornado.gen import coroutine\nfrom tornado.log import app_log\nfrom tornado.httpclient import AsyncHTTPClient, HTTPRequest\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.options import define, options, parse_command_line\n\n\n@coroutine\ndef cull_idle(url, api_token, timeout, cull_users=False):\n \"\"\"Shutdown idle single-user servers\n\n If cull_users, inactive *users* will be deleted as well.\n \"\"\"\n auth_header = {\n 'Authorization': 'token %s' % api_token\n }\n req = HTTPRequest(url=url + '/users',\n headers=auth_header,\n )\n now = datetime.datetime.utcnow()\n cull_limit = now - datetime.timedelta(seconds=timeout)\n client = AsyncHTTPClient()\n resp = yield client.fetch(req)\n users = json.loads(resp.body.decode('utf8', 'replace'))\n futures = []\n\n @coroutine\n def cull_one(user, last_activity):\n \"\"\"cull one user\"\"\"\n\n # shutdown server first. Hub doesn't allow deleting users with running servers.\n if user['server']:\n app_log.info(\"Culling server for %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s/server' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n\n for user in users:\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n \n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n\n\nif __name__ == '__main__':\n define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help=\"The JupyterHub API URL\")\n define('timeout', default=600, help=\"The idle timeout (in seconds)\")\n define('cull_every', default=0, help=\"The interval (in seconds) for checking for idle servers to cull\")\n define('cull_users', default=False,\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n \n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n \n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n # run once before scheduling periodic call\n loop.run_sync(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n try:\n loop.start()\n except KeyboardInterrupt:\n pass\n", "path": "images/hub/cull_idle_servers.py"}]}
1,995
647
gh_patches_debug_13209
rasdani/github-patches
git_diff
cal-itp__benefits-38
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ensure cookies are enabled Need a basic client-side check that cookies are enabled (and UI if not) as soon as the app loads, since we require cookies to store the temporary transaction data. </issue> <code> [start of benefits/settings.py] 1 """ 2 Django settings for benefits project. 3 """ 4 import os 5 6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 8 9 # SECURITY WARNING: keep the secret key used in production secret! 10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"] 11 12 # SECURITY WARNING: don't run with debug turned on in production! 13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true" 14 15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true" 16 17 ALLOWED_HOSTS = [] 18 19 if DEBUG: 20 ALLOWED_HOSTS.extend(["*"]) 21 else: 22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split() 23 ALLOWED_HOSTS.extend(hosts) 24 25 # Application definition 26 27 INSTALLED_APPS = [ 28 "django.contrib.sessions", 29 "django.contrib.staticfiles", 30 "benefits.core", 31 "benefits.enrollment", 32 "benefits.eligibility", 33 ] 34 35 if ADMIN: 36 INSTALLED_APPS.extend( 37 [ 38 "django.contrib.admin", 39 "django.contrib.auth", 40 "django.contrib.contenttypes", 41 "django.contrib.messages", 42 ] 43 ) 44 45 MIDDLEWARE = [ 46 "django.middleware.security.SecurityMiddleware", 47 "django.contrib.sessions.middleware.SessionMiddleware", 48 "django.middleware.locale.LocaleMiddleware", 49 "django.middleware.common.CommonMiddleware", 50 "django.middleware.csrf.CsrfViewMiddleware", 51 "django.middleware.clickjacking.XFrameOptionsMiddleware", 52 "benefits.core.middleware.DebugSession", 53 "benefits.core.middleware.ChangedLanguageEvent", 54 ] 55 56 if ADMIN: 57 MIDDLEWARE.extend( 58 [ 59 "django.contrib.auth.middleware.AuthenticationMiddleware", 60 "django.contrib.messages.middleware.MessageMiddleware", 61 ] 62 ) 63 64 CSRF_COOKIE_HTTPONLY = True 65 66 SESSION_COOKIE_AGE = 3600 67 SESSION_COOKIE_SAMESITE = "Strict" 68 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies" 69 70 if not DEBUG: 71 CSRF_COOKIE_SECURE = True 72 SESSION_COOKIE_SECURE = True 73 74 ROOT_URLCONF = "benefits.urls" 75 76 template_ctx_processors = [ 77 "django.template.context_processors.request", 78 "benefits.core.context_processors.analytics", 79 ] 80 81 if DEBUG: 82 template_ctx_processors.extend( 83 [ 84 "django.template.context_processors.debug", 85 "benefits.core.context_processors.debug", 86 ] 87 ) 88 89 if ADMIN: 90 template_ctx_processors.extend( 91 [ 92 "django.contrib.auth.context_processors.auth", 93 "django.contrib.messages.context_processors.messages", 94 ] 95 ) 96 97 TEMPLATES = [ 98 { 99 "BACKEND": "django.template.backends.django.DjangoTemplates", 100 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")], 101 "APP_DIRS": True, 102 "OPTIONS": { 103 "context_processors": template_ctx_processors, 104 }, 105 }, 106 ] 107 108 WSGI_APPLICATION = "benefits.wsgi.application" 109 110 DATABASES = { 111 "default": { 112 "ENGINE": "django.db.backends.sqlite3", 113 "NAME": os.environ.get("DJANGO_DB", "django") + ".db", 114 } 115 } 116 117 # Password validation 118 119 AUTH_PASSWORD_VALIDATORS = [] 120 121 if ADMIN: 122 AUTH_PASSWORD_VALIDATORS.extend( 123 [ 124 { 125 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", 126 }, 127 { 128 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", 129 }, 130 { 131 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", 132 }, 133 { 134 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", 135 }, 136 ] 137 ) 138 139 # Internationalization 140 141 LANGUAGE_CODE = "en" 142 143 LANGUAGES = [("en", "English"), ("es", "Español")] 144 145 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")] 146 147 USE_I18N = True 148 USE_L10N = True 149 150 TIME_ZONE = "UTC" 151 USE_TZ = True 152 153 # Static files (CSS, JavaScript, Images) 154 155 STATIC_URL = "/static/" 156 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")] 157 STATIC_ROOT = os.path.join(BASE_DIR, "static") 158 159 # Logging configuration 160 161 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING") 162 LOGGING = { 163 "version": 1, 164 "disable_existing_loggers": False, 165 "formatters": { 166 "default": { 167 "format": "[{asctime}] {levelname} {name}:{lineno} {message}", 168 "datefmt": "%d/%b/%Y %H:%M:%S", 169 "style": "{", 170 }, 171 }, 172 "handlers": { 173 "default": {"class": "logging.StreamHandler", "formatter": "default"}, 174 }, 175 "root": { 176 "handlers": ["default"], 177 "level": LOG_LEVEL, 178 }, 179 "loggers": {"django": {"handlers": ["default"], "propagate": False}}, 180 } 181 182 # Analytics configuration 183 184 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY") 185 [end of benefits/settings.py] [start of benefits/urls.py] 1 """ 2 benefits URL Configuration 3 4 The `urlpatterns` list routes URLs to views. For more information please see: 5 https://docs.djangoproject.com/en/3.1/topics/http/urls/ 6 """ 7 import logging 8 9 from django.urls import include, path 10 11 from benefits.settings import ADMIN 12 13 14 logger = logging.getLogger(__name__) 15 16 handler400 = "benefits.core.views.bad_request" 17 handler403 = "benefits.core.views.bad_request" 18 handler404 = "benefits.core.views.page_not_found" 19 handler500 = "benefits.core.views.server_error" 20 21 urlpatterns = [ 22 path("", include("benefits.core.urls")), 23 path("enrollment/", include("benefits.enrollment.urls")), 24 path("eligibility/", include("benefits.eligibility.urls")), 25 path("i18n/", include("django.conf.urls.i18n")), 26 ] 27 28 if ADMIN: 29 from django.contrib import admin 30 31 logger.debug("Register admin/ urls") 32 urlpatterns.append(path("admin/", admin.site.urls)) 33 else: 34 logger.debug("Skip url registrations for admin") 35 [end of benefits/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/settings.py b/benefits/settings.py --- a/benefits/settings.py +++ b/benefits/settings.py @@ -69,6 +69,7 @@ if not DEBUG: CSRF_COOKIE_SECURE = True + CSRF_FAILURE_VIEW = "benefits.core.views.bad_request" SESSION_COOKIE_SECURE = True ROOT_URLCONF = "benefits.urls" diff --git a/benefits/urls.py b/benefits/urls.py --- a/benefits/urls.py +++ b/benefits/urls.py @@ -20,8 +20,8 @@ urlpatterns = [ path("", include("benefits.core.urls")), - path("enrollment/", include("benefits.enrollment.urls")), path("eligibility/", include("benefits.eligibility.urls")), + path("enrollment/", include("benefits.enrollment.urls")), path("i18n/", include("django.conf.urls.i18n")), ]
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -69,6 +69,7 @@\n \n if not DEBUG:\n CSRF_COOKIE_SECURE = True\n+ CSRF_FAILURE_VIEW = \"benefits.core.views.bad_request\"\n SESSION_COOKIE_SECURE = True\n \n ROOT_URLCONF = \"benefits.urls\"\ndiff --git a/benefits/urls.py b/benefits/urls.py\n--- a/benefits/urls.py\n+++ b/benefits/urls.py\n@@ -20,8 +20,8 @@\n \n urlpatterns = [\n path(\"\", include(\"benefits.core.urls\")),\n- path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n+ path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n ]\n", "issue": "Ensure cookies are enabled\nNeed a basic client-side check that cookies are enabled (and UI if not) as soon as the app loads, since we require cookies to store the temporary transaction data.\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split()\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}, {"content": "\"\"\"\nbenefits URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\n\"\"\"\nimport logging\n\nfrom django.urls import include, path\n\nfrom benefits.settings import ADMIN\n\n\nlogger = logging.getLogger(__name__)\n\nhandler400 = \"benefits.core.views.bad_request\"\nhandler403 = \"benefits.core.views.bad_request\"\nhandler404 = \"benefits.core.views.page_not_found\"\nhandler500 = \"benefits.core.views.server_error\"\n\nurlpatterns = [\n path(\"\", include(\"benefits.core.urls\")),\n path(\"enrollment/\", include(\"benefits.enrollment.urls\")),\n path(\"eligibility/\", include(\"benefits.eligibility.urls\")),\n path(\"i18n/\", include(\"django.conf.urls.i18n\")),\n]\n\nif ADMIN:\n from django.contrib import admin\n\n logger.debug(\"Register admin/ urls\")\n urlpatterns.append(path(\"admin/\", admin.site.urls))\nelse:\n logger.debug(\"Skip url registrations for admin\")\n", "path": "benefits/urls.py"}]}
2,404
212
gh_patches_debug_19230
rasdani/github-patches
git_diff
google__clusterfuzz-863
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fuzzers page does not work well with large number of jobs For libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated. @oliverchang as fyi. </issue> <code> [start of src/python/fuzzing/fuzzer_selection.py] 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Helper functions to update fuzzer-job mappings, and select fuzzers to run.""" 15 16 import collections 17 18 from base import utils 19 from datastore import data_types 20 from datastore import fuzz_target_utils 21 from datastore import ndb 22 from datastore import ndb_utils 23 from metrics import logs 24 from system import environment 25 26 # Used to prepare targets to be passed to utils.random_weighted_choice. 27 WeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight']) 28 29 30 def update_mappings_for_fuzzer(fuzzer, mappings=None): 31 """Clear existing mappings for a fuzzer, and replace them.""" 32 if mappings is None: 33 mappings = fuzzer.jobs 34 35 query = data_types.FuzzerJob.query() 36 query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name) 37 entities = ndb_utils.get_all_from_query(query) 38 old_mappings = {} 39 for entity in entities: 40 old_mappings[(entity.job, entity.platform)] = entity 41 42 new_mappings = [] 43 for job_name in mappings: 44 job = data_types.Job.query(data_types.Job.name == job_name).get() 45 if not job: 46 logs.log_error('An unknown job %s was selected for fuzzer %s.' % 47 (job_name, fuzzer.name)) 48 continue 49 50 mapping = old_mappings.pop((job_name, job.platform), None) 51 if mapping: 52 continue 53 54 mapping = data_types.FuzzerJob() 55 mapping.fuzzer = fuzzer.name 56 mapping.job = job_name 57 mapping.platform = job.platform 58 new_mappings.append(mapping) 59 60 ndb.put_multi(new_mappings) 61 ndb.delete_multi([m.key for m in list(old_mappings.values())]) 62 63 64 def update_platform_for_job(job_name, new_platform): 65 """Update platform for all mappings for a particular job.""" 66 query = data_types.FuzzerJob.query() 67 query = query.filter(data_types.FuzzerJob.job == job_name) 68 mappings = ndb_utils.get_all_from_query(query) 69 new_mappings = [] 70 for mapping in mappings: 71 mapping.platform = new_platform 72 new_mappings.append(mapping) 73 ndb.put_multi(new_mappings) 74 75 76 def get_fuzz_task_payload(platform=None): 77 """Select a fuzzer that can run on this platform.""" 78 if not platform: 79 queue_override = environment.get_value('QUEUE_OVERRIDE') 80 platform = queue_override if queue_override else environment.platform() 81 82 query = data_types.FuzzerJob.query() 83 query = query.filter(data_types.FuzzerJob.platform == platform) 84 85 mappings = list(ndb_utils.get_all_from_query(query)) 86 if not mappings: 87 return None, None 88 89 selection = utils.random_weighted_choice(mappings) 90 return selection.fuzzer, selection.job 91 92 93 def select_fuzz_target(targets, target_weights): 94 """Select a fuzz target from a list of potential targets.""" 95 assert targets 96 97 weighted_targets = [] 98 for target in targets: 99 weight = target_weights.get(target, 1.0) 100 weighted_targets.append(WeightedTarget(target, weight)) 101 102 return utils.random_weighted_choice(weighted_targets).target 103 104 105 def get_fuzz_target_weights(): 106 """Get a list of fuzz target weights based on the current fuzzer.""" 107 job_type = environment.get_value('JOB_NAME') 108 109 target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type)) 110 fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs) 111 112 weights = {} 113 for fuzz_target, target_job in zip(fuzz_targets, target_jobs): 114 if not fuzz_target: 115 logs.log_error('Skipping weight assignment for fuzz target %s.' % 116 target_job.fuzz_target_name) 117 continue 118 119 weights[fuzz_target.binary] = target_job.weight 120 121 return weights 122 [end of src/python/fuzzing/fuzzer_selection.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py --- a/src/python/fuzzing/fuzzer_selection.py +++ b/src/python/fuzzing/fuzzer_selection.py @@ -37,20 +37,20 @@ entities = ndb_utils.get_all_from_query(query) old_mappings = {} for entity in entities: - old_mappings[(entity.job, entity.platform)] = entity + old_mappings[entity.job] = entity new_mappings = [] for job_name in mappings: + mapping = old_mappings.pop(job_name, None) + if mapping: + continue + job = data_types.Job.query(data_types.Job.name == job_name).get() if not job: logs.log_error('An unknown job %s was selected for fuzzer %s.' % (job_name, fuzzer.name)) continue - mapping = old_mappings.pop((job_name, job.platform), None) - if mapping: - continue - mapping = data_types.FuzzerJob() mapping.fuzzer = fuzzer.name mapping.job = job_name
{"golden_diff": "diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py\n--- a/src/python/fuzzing/fuzzer_selection.py\n+++ b/src/python/fuzzing/fuzzer_selection.py\n@@ -37,20 +37,20 @@\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n- old_mappings[(entity.job, entity.platform)] = entity\n+ old_mappings[entity.job] = entity\n \n new_mappings = []\n for job_name in mappings:\n+ mapping = old_mappings.pop(job_name, None)\n+ if mapping:\n+ continue\n+\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n \n- mapping = old_mappings.pop((job_name, job.platform), None)\n- if mapping:\n- continue\n-\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n", "issue": "Fuzzers page does not work well with large number of jobs\nFor libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.\r\n\r\n@oliverchang as fyi.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Helper functions to update fuzzer-job mappings, and select fuzzers to run.\"\"\"\n\nimport collections\n\nfrom base import utils\nfrom datastore import data_types\nfrom datastore import fuzz_target_utils\nfrom datastore import ndb\nfrom datastore import ndb_utils\nfrom metrics import logs\nfrom system import environment\n\n# Used to prepare targets to be passed to utils.random_weighted_choice.\nWeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])\n\n\ndef update_mappings_for_fuzzer(fuzzer, mappings=None):\n \"\"\"Clear existing mappings for a fuzzer, and replace them.\"\"\"\n if mappings is None:\n mappings = fuzzer.jobs\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n old_mappings[(entity.job, entity.platform)] = entity\n\n new_mappings = []\n for job_name in mappings:\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n\n mapping = old_mappings.pop((job_name, job.platform), None)\n if mapping:\n continue\n\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n mapping.platform = job.platform\n new_mappings.append(mapping)\n\n ndb.put_multi(new_mappings)\n ndb.delete_multi([m.key for m in list(old_mappings.values())])\n\n\ndef update_platform_for_job(job_name, new_platform):\n \"\"\"Update platform for all mappings for a particular job.\"\"\"\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.job == job_name)\n mappings = ndb_utils.get_all_from_query(query)\n new_mappings = []\n for mapping in mappings:\n mapping.platform = new_platform\n new_mappings.append(mapping)\n ndb.put_multi(new_mappings)\n\n\ndef get_fuzz_task_payload(platform=None):\n \"\"\"Select a fuzzer that can run on this platform.\"\"\"\n if not platform:\n queue_override = environment.get_value('QUEUE_OVERRIDE')\n platform = queue_override if queue_override else environment.platform()\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.platform == platform)\n\n mappings = list(ndb_utils.get_all_from_query(query))\n if not mappings:\n return None, None\n\n selection = utils.random_weighted_choice(mappings)\n return selection.fuzzer, selection.job\n\n\ndef select_fuzz_target(targets, target_weights):\n \"\"\"Select a fuzz target from a list of potential targets.\"\"\"\n assert targets\n\n weighted_targets = []\n for target in targets:\n weight = target_weights.get(target, 1.0)\n weighted_targets.append(WeightedTarget(target, weight))\n\n return utils.random_weighted_choice(weighted_targets).target\n\n\ndef get_fuzz_target_weights():\n \"\"\"Get a list of fuzz target weights based on the current fuzzer.\"\"\"\n job_type = environment.get_value('JOB_NAME')\n\n target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))\n fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)\n\n weights = {}\n for fuzz_target, target_job in zip(fuzz_targets, target_jobs):\n if not fuzz_target:\n logs.log_error('Skipping weight assignment for fuzz target %s.' %\n target_job.fuzz_target_name)\n continue\n\n weights[fuzz_target.binary] = target_job.weight\n\n return weights\n", "path": "src/python/fuzzing/fuzzer_selection.py"}]}
1,817
248
gh_patches_debug_20320
rasdani/github-patches
git_diff
praw-dev__praw-1104
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Replying to comments in quarantined subreddits returns an empty object I have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error. ``` praw/models/reddit/mixins/replyable.py", line 26, in reply return self._reddit.post(API_PATH["comment"], data=data)[0] IndexError: list index out of range ``` The reply call succeeds, returning a valid http code and the following json ``` {'json': {'errors': [], 'data': {'things': []}}} ``` `objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created. I'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas. </issue> <code> [start of praw/models/reddit/mixins/replyable.py] 1 """Provide the ReplyableMixin class.""" 2 from ....const import API_PATH 3 4 5 class ReplyableMixin: 6 """Interface for RedditBase classes that can be replied to.""" 7 8 def reply(self, body): 9 """Reply to the object. 10 11 :param body: The markdown formatted content for a comment. 12 :returns: A :class:`~.Comment` object for the newly created comment. 13 14 Example usage: 15 16 .. code:: python 17 18 submission = reddit.submission(id='5or86n') 19 submission.reply('reply') 20 21 comment = reddit.comment(id='dxolpyc') 22 comment.reply('reply') 23 24 """ 25 data = {"text": body, "thing_id": self.fullname} 26 return self._reddit.post(API_PATH["comment"], data=data)[0] 27 [end of praw/models/reddit/mixins/replyable.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py --- a/praw/models/reddit/mixins/replyable.py +++ b/praw/models/reddit/mixins/replyable.py @@ -9,7 +9,14 @@ """Reply to the object. :param body: The markdown formatted content for a comment. - :returns: A :class:`~.Comment` object for the newly created comment. + :returns: A :class:`~.Comment` object for the newly created + comment or ``None`` if Reddit doesn't provide one. + + A ``None`` value can be returned if the target is a comment or + submission in a quarantined subreddit and the authenticated user + has not opt-ed in to viewing the content. When this happens the + comment will be sucessfully created on Reddit and can be retried + by drawing the comment from the user's comment history. Example usage: @@ -23,4 +30,8 @@ """ data = {"text": body, "thing_id": self.fullname} - return self._reddit.post(API_PATH["comment"], data=data)[0] + comments = self._reddit.post(API_PATH["comment"], data=data) + try: + return comments[0] + except IndexError: + return None
{"golden_diff": "diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py\n--- a/praw/models/reddit/mixins/replyable.py\n+++ b/praw/models/reddit/mixins/replyable.py\n@@ -9,7 +9,14 @@\n \"\"\"Reply to the object.\n \n :param body: The markdown formatted content for a comment.\n- :returns: A :class:`~.Comment` object for the newly created comment.\n+ :returns: A :class:`~.Comment` object for the newly created\n+ comment or ``None`` if Reddit doesn't provide one.\n+\n+ A ``None`` value can be returned if the target is a comment or\n+ submission in a quarantined subreddit and the authenticated user\n+ has not opt-ed in to viewing the content. When this happens the\n+ comment will be sucessfully created on Reddit and can be retried\n+ by drawing the comment from the user's comment history.\n \n Example usage:\n \n@@ -23,4 +30,8 @@\n \n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n- return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n+ comments = self._reddit.post(API_PATH[\"comment\"], data=data)\n+ try:\n+ return comments[0]\n+ except IndexError:\n+ return None\n", "issue": "Replying to comments in quarantined subreddits returns an empty object\nI have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.\r\n\r\n```\r\npraw/models/reddit/mixins/replyable.py\", line 26, in reply\r\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\r\nIndexError: list index out of range\r\n```\r\nThe reply call succeeds, returning a valid http code and the following json\r\n```\r\n{'json': {'errors': [], 'data': {'things': []}}}\r\n```\r\n`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.\r\n\r\nI'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.\n", "before_files": [{"content": "\"\"\"Provide the ReplyableMixin class.\"\"\"\nfrom ....const import API_PATH\n\n\nclass ReplyableMixin:\n \"\"\"Interface for RedditBase classes that can be replied to.\"\"\"\n\n def reply(self, body):\n \"\"\"Reply to the object.\n\n :param body: The markdown formatted content for a comment.\n :returns: A :class:`~.Comment` object for the newly created comment.\n\n Example usage:\n\n .. code:: python\n\n submission = reddit.submission(id='5or86n')\n submission.reply('reply')\n\n comment = reddit.comment(id='dxolpyc')\n comment.reply('reply')\n\n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n", "path": "praw/models/reddit/mixins/replyable.py"}]}
975
313