{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'IndexTTS2' ) { link.textContent = 'IndexTTS2'; link.href = 'https://vibevoice.info/indextts2'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n-\n"}}},{"rowIdx":412,"cells":{"in_source_id":{"kind":"string","value":"dotkom__onlineweb4-1281"},"issue":{"kind":"string","value":"Separate requirements in develop, testing and production\nWe should separate requirements for various environments. What sparked this was requiring some postgresql magic to install all requirements (`psycopg2`), and e.g. moonshine does not want postgres just for tests to pass (or run, for that matter).\n\nRequirements that should be removed from `requirements.txt` and added to `requirements-.txt`:\n#### Production requirements [src](https://github.com/dotKom/onlineweb4/blob/develop/requirements.txt#L9):\n- `psycopg2` (yes, some people use postgres in development. However, example-local.py uses sqlite and most people will therefore not need this package. *1)\n#### Testing requirements [src](https://github.com/dotKom/onlineweb4/blob/develop/requirements.txt#L38):\n- `cov-core`\n- `coverage`\n- `django-nose`\n- `factory-boy`\n- `lettuce`\n- `nose` *2\n- `nose-cov`\n- `teamcity-messages`\n#### Development requirements (these can be/should be fully optional)\n- `django-debug-toolbar` (it's not in `requirements.txt` as of now, but maybe it should? [wiki](https://github.com/dotKom/onlineweb4/wiki/Django-debug-toolbar))\n\n*1: It also fails to install and stops requirements.txt installation if people don't have postgresql dev headers ([ex.](https://online.ntnu.no/tc/viewLog.html?tab=buildLog&buildTypeId=Onlineweb4_Build&buildId=29))\n*2: If we remove this one we have to change default runner class in base settings as the current testrunner is nose.\n\n"},"before_files":{"kind":"list like","value":[{"content":"# -*- coding: utf8 -*-\nimport os\nimport sys\n\nimport wiki\nfrom django.contrib.messages import constants as messages\n\n# Directory that contains this file.\nPROJECT_SETTINGS_DIRECTORY = os.path.dirname(globals()['__file__'])\n# Root directory. Contains manage.py\nPROJECT_ROOT_DIRECTORY = os.path.join(PROJECT_SETTINGS_DIRECTORY, '..', '..')\n\nTEST_RUNNER = \"django_nose.NoseTestSuiteRunner\"\n\nNOSE_ARGS = ['--with-coverage', '--cover-package=apps', '--cover-html-dir=coverage', '--cover-xml', '--cover-html']\n\nDEBUG = False\n\nADMINS = (\n ('dotKom', 'dotkom@online.ntnu.no'),\n)\nMANAGERS = ADMINS\n\n# Email settings\nDEFAULT_FROM_EMAIL = 'online@online.ntnu.no'\nEMAIL_ARRKOM = 'arrkom@online.ntnu.no'\nEMAIL_BEDKOM = 'bedkom@online.ntnu.no'\nEMAIL_DOTKOM = 'dotkom@online.ntnu.no'\nEMAIL_EKSKOM = 'ekskom@online.ntnu.no'\nEMAIL_FAGKOM = 'fagkom@online.ntnu.no'\nEMAIL_PROKOM = 'prokom@online.ntnu.no'\nEMAIL_TRIKOM = 'trikom@online.ntnu.no'\n\n# We will receive errors and other django messages from this email\nSERVER_EMAIL = 'onlineweb4-error@online.ntnu.no'\n\nTIME_ZONE = 'Europe/Oslo'\n\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'nb'\nLANGUAGES = (\n ('nb', 'Norwegian'),\n ('en_US', 'English'),\n )\nLOCALE_PATHS = [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'locale'),\n]\n\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDATETIME_FORMAT = 'N j, Y, H:i'\nSECRET_KEY = 'override-this-in-local.py'\n\n# Session cookie expires after one year\nSESSION_COOKIE_AGE = 31540000\n\n# Override this in local if you need to :)\nBASE_URL = 'https://online.ntnu.no'\n\nAUTH_USER_MODEL = 'authentication.OnlineUser'\nLOGIN_URL = '/auth/login/'\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'uploaded_media') # Override this in local.py in prod.\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'static')\nSTATIC_URL = '/static/'\n\n# Prefix for default profile picture\nDEFAULT_PROFILE_PICTURE_PREFIX = os.path.join(STATIC_URL, \"img\", \"profile_default\")\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT_DIRECTORY, 'files/static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# Including django-wiki static files so we can import the less files.\nDJANGO_WIKI_STATIC = os.path.join(os.path.dirname(wiki.__file__), 'static')\n\nCOMPRESS_FILES = True\nCOMPRESS_OUTPUT_DIR = 'cache'\nCOMPRESS_PRECOMPILERS = (\n ('text/less', 'lessc --include-path=%s {infile} {outfile}' % DJANGO_WIKI_STATIC),\n)\n\nCOMPRESS_CSS_FILTERS = [\n 'compressor.filters.css_default.CssAbsoluteFilter',\n # We want this later on, but it breaks production so disabling for now.\n #'compressor-filters.cssmin.CSSMinFilter',\n]\nCOMPRESS_JS_FILTERS = [\n 'compressor.filters.jsmin.JSMinFilter',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'templates/')\n ],\n 'OPTIONS': {\n 'context_processors': [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"sekizai.context_processors.sekizai\", # Wiki\n \"onlineweb4.context_processors.analytics\",\n ],\n 'debug': DEBUG,\n }\n }\n]\n\nMIDDLEWARE_CLASSES = (\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'middleware.http.Http403Middleware',\n 'reversion.middleware.RevisionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend', # this is default\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nROOT_URLCONF = 'onlineweb4.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'onlineweb4.wsgi.application'\n\n# Pizzasystem settings\nPIZZA_GROUP = 'dotkom'\nPIZZA_ADMIN_GROUP = 'pizzaadmin'\n\n# Grappelli settings\nGRAPPELLI_ADMIN_TITLE = 'Onlineweb'\n\n# Guardian settings\nANONYMOUS_USER_ID = -1\nGUARDIAN_RENDER_403 = True\n\n# Django-Taggit settings\nTAGGIT_CASE_INSENSITIVE = True\n\n# List of usergroups that should be listed under \"Finn brukere\" in user profile\nUSER_SEARCH_GROUPS = [\n 16, # appKom\n 1, # arrKom\n 2, # banKom\n 3, # bedKom\n 4, # dotKom\n 5, # eksKom\n 14, # Eldsteradet\n 6, # fagKom\n 11, # Hovedstyret\n 19, # jubKom\n 10, # pangKom\n 7, # proKom\n 18, # seniorKom\n 8, # triKom\n 9, # velKom\n 24, # itex\n]\n\n#List of mailing lists, used in update_sympa_memcache_from_sql.py\nPUBLIC_LISTS = [\n \"foreninger\",\n \"linjeforeninger\",\n \"gloshaugen\",\n \"dragvoll\",\n \"masterforeninger\",\n \"kjellere\",\n \"linjeledere\",\n \"linjeredaksjoner\",\n \"glosfaddere\",\n \"sr-samarbeid\",\n \"ivt-samarbeid\",\n \"linjekor\",\n \"studentdemokratiet\"\n]\n\nINSTALLED_APPS = (\n # Third party dependencies\n 'django.contrib.humanize',\n 'django_nose',\n 'django_nyt', # Wiki\n 'mptt', # Wiki\n 'sekizai', # Wiki\n 'sorl.thumbnail', # Wiki\n 'grappelli',\n 'filebrowser',\n 'chunks',\n 'crispy_forms',\n 'django_extensions',\n 'django_dynamic_fixture',\n 'oauth2_provider',\n 'captcha',\n 'compressor',\n 'pdfdocument',\n 'watson',\n 'gunicorn',\n 'markdown_deux',\n 'djangoformsetjs',\n 'reversion',\n 'guardian',\n 'stripe',\n 'rest_framework',\n 'django_filters',\n 'taggit',\n 'taggit_serializer',\n 'corsheaders',\n 'datetimewidget',\n\n # Django apps\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n # Onlineweb 4 apps\n 'apps.api',\n 'apps.approval',\n 'apps.article',\n 'apps.authentication',\n 'apps.autoconfig',\n 'apps.careeropportunity',\n 'apps.companyprofile',\n 'apps.dashboard',\n 'apps.gallery',\n 'apps.events',\n 'apps.marks',\n 'apps.offline',\n 'apps.feedback',\n 'apps.mommy',\n 'apps.profiles',\n 'apps.genfors',\n 'apps.resourcecenter',\n 'apps.mailinglists',\n 'apps.inventory',\n 'apps.payment',\n 'apps.posters',\n 'apps.sso',\n 'apps.splash',\n 'apps.shop',\n 'apps.webshop',\n 'scripts',\n\n #External apps\n 'feedme',\n 'redwine',\n\n #Wiki\n 'wiki',\n 'wiki.plugins.attachments',\n 'wiki.plugins.images',\n 'wiki.plugins.macros',\n 'wiki.plugins.help',\n 'wiki.plugins.links',\n\n)\n\n\n# SSO / OAuth2 settings\nif 'apps.sso' in INSTALLED_APPS:\n from apps.sso.settings import OAUTH2_SCOPES\n OAUTH2_PROVIDER = {\n 'SCOPES': OAUTH2_SCOPES,\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600,\n 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,\n }\n OAUTH2_PROVIDER_APPLICATION_MODEL = 'sso.Client'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n }\n },\n 'loggers': {\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'feedback': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'syncer': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n# crispy forms settings\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# bootstrap messages classes\nMESSAGE_TAGS = {messages.DEBUG: 'alert-debug',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-error'}\n\n\n# Not really sure what this does.\n# Has something to do with django-dynamic-fixture bumped from 1.6.4 to 1.6.5 in order to run a syncdb with mysql/postgres (OptimusCrime)\nIMPORT_DDF_MODELS = False\n\n# Django REST framework\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication', # Allows users to be logged in to browsable API\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n 'rest_framework.parsers.FileUploadParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework.renderers.AdminRenderer',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 10\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/v1/.*$' # Enables CORS on /api/v1/ endpoints only\n\n# Remember to keep 'local' last, so it can override any setting.\nfor settings_module in ['filebrowser', 'django_wiki', 'local']: # local last\n if not os.path.exists(os.path.join(PROJECT_SETTINGS_DIRECTORY,\n settings_module + \".py\")):\n sys.stderr.write(\"Could not find settings module '%s'.\\n\" %\n settings_module)\n if settings_module == 'local':\n sys.stderr.write(\"You need to copy the settings file \"\n \"'onlineweb4/settings/example-local.py' to \"\n \"'onlineweb4/settings/local.py'.\\n\")\n sys.exit(1)\n try:\n exec('from .%s import *' % settings_module)\n except ImportError as e:\n print(\"Could not import settings for '%s' : %s\" % (settings_module,\n str(e)))\n","path":"onlineweb4/settings/base.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf8 -*-\\nimport os\\nimport sys\\n\\nimport wiki\\nfrom django.contrib.messages import constants as messages\\n\\n# Directory that contains this file.\\nPROJECT_SETTINGS_DIRECTORY = os.path.dirname(globals()['__file__'])\\n# Root directory. Contains manage.py\\nPROJECT_ROOT_DIRECTORY = os.path.join(PROJECT_SETTINGS_DIRECTORY, '..', '..')\\n\\nTEST_RUNNER = \\\"django_nose.NoseTestSuiteRunner\\\"\\n\\nNOSE_ARGS = ['--with-coverage', '--cover-package=apps', '--cover-html-dir=coverage', '--cover-xml', '--cover-html']\\n\\nDEBUG = False\\n\\nADMINS = (\\n ('dotKom', 'dotkom@online.ntnu.no'),\\n)\\nMANAGERS = ADMINS\\n\\n# Email settings\\nDEFAULT_FROM_EMAIL = 'online@online.ntnu.no'\\nEMAIL_ARRKOM = 'arrkom@online.ntnu.no'\\nEMAIL_BEDKOM = 'bedkom@online.ntnu.no'\\nEMAIL_DOTKOM = 'dotkom@online.ntnu.no'\\nEMAIL_EKSKOM = 'ekskom@online.ntnu.no'\\nEMAIL_FAGKOM = 'fagkom@online.ntnu.no'\\nEMAIL_PROKOM = 'prokom@online.ntnu.no'\\nEMAIL_TRIKOM = 'trikom@online.ntnu.no'\\n\\n# We will receive errors and other django messages from this email\\nSERVER_EMAIL = 'onlineweb4-error@online.ntnu.no'\\n\\nTIME_ZONE = 'Europe/Oslo'\\n\\n# http://www.i18nguy.com/unicode/language-identifiers.html\\nLANGUAGE_CODE = 'nb'\\nLANGUAGES = (\\n ('nb', 'Norwegian'),\\n ('en_US', 'English'),\\n )\\nLOCALE_PATHS = [\\n os.path.join(PROJECT_ROOT_DIRECTORY, 'locale'),\\n]\\n\\nSITE_ID = 1\\nUSE_I18N = True\\nUSE_L10N = True\\nUSE_TZ = True\\nDATETIME_FORMAT = 'N j, Y, H:i'\\nSECRET_KEY = 'override-this-in-local.py'\\n\\n# Session cookie expires after one year\\nSESSION_COOKIE_AGE = 31540000\\n\\n# Override this in local if you need to :)\\nBASE_URL = 'https://online.ntnu.no'\\n\\nAUTH_USER_MODEL = 'authentication.OnlineUser'\\nLOGIN_URL = '/auth/login/'\\n\\nMEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'uploaded_media') # Override this in local.py in prod.\\nMEDIA_URL = '/media/'\\n\\nSTATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'static')\\nSTATIC_URL = '/static/'\\n\\n# Prefix for default profile picture\\nDEFAULT_PROFILE_PICTURE_PREFIX = os.path.join(STATIC_URL, \\\"img\\\", \\\"profile_default\\\")\\n\\n# Additional locations of static files\\nSTATICFILES_DIRS = (\\n os.path.join(PROJECT_ROOT_DIRECTORY, 'files/static'),\\n)\\n\\nSTATICFILES_FINDERS = (\\n 'django.contrib.staticfiles.finders.FileSystemFinder',\\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\\n 'compressor.finders.CompressorFinder',\\n)\\n\\n# Including django-wiki static files so we can import the less files.\\nDJANGO_WIKI_STATIC = os.path.join(os.path.dirname(wiki.__file__), 'static')\\n\\nCOMPRESS_FILES = True\\nCOMPRESS_OUTPUT_DIR = 'cache'\\nCOMPRESS_PRECOMPILERS = (\\n ('text/less', 'lessc --include-path=%s {infile} {outfile}' % DJANGO_WIKI_STATIC),\\n)\\n\\nCOMPRESS_CSS_FILTERS = [\\n 'compressor.filters.css_default.CssAbsoluteFilter',\\n # We want this later on, but it breaks production so disabling for now.\\n #'compressor-filters.cssmin.CSSMinFilter',\\n]\\nCOMPRESS_JS_FILTERS = [\\n 'compressor.filters.jsmin.JSMinFilter',\\n]\\n\\nTEMPLATES = [\\n {\\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\\n 'APP_DIRS': True,\\n 'DIRS': [\\n os.path.join(PROJECT_ROOT_DIRECTORY, 'templates/')\\n ],\\n 'OPTIONS': {\\n 'context_processors': [\\n \\\"django.contrib.auth.context_processors.auth\\\",\\n \\\"django.template.context_processors.debug\\\",\\n \\\"django.template.context_processors.i18n\\\",\\n \\\"django.template.context_processors.media\\\",\\n \\\"django.template.context_processors.request\\\",\\n \\\"django.template.context_processors.static\\\",\\n \\\"django.template.context_processors.tz\\\",\\n \\\"django.contrib.messages.context_processors.messages\\\",\\n \\\"sekizai.context_processors.sekizai\\\", # Wiki\\n \\\"onlineweb4.context_processors.analytics\\\",\\n ],\\n 'debug': DEBUG,\\n }\\n }\\n]\\n\\nMIDDLEWARE_CLASSES = (\\n 'corsheaders.middleware.CorsMiddleware',\\n 'django.middleware.common.CommonMiddleware',\\n 'django.contrib.sessions.middleware.SessionMiddleware',\\n 'django.middleware.csrf.CsrfViewMiddleware',\\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\\n 'django.contrib.messages.middleware.MessageMiddleware',\\n 'middleware.http.Http403Middleware',\\n 'reversion.middleware.RevisionMiddleware',\\n # Uncomment the next line for simple clickjacking protection:\\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\\n)\\n\\nAUTHENTICATION_BACKENDS = (\\n 'django.contrib.auth.backends.ModelBackend', # this is default\\n 'guardian.backends.ObjectPermissionBackend',\\n)\\n\\nROOT_URLCONF = 'onlineweb4.urls'\\n\\n# Python dotted path to the WSGI application used by Django's runserver.\\nWSGI_APPLICATION = 'onlineweb4.wsgi.application'\\n\\n# Pizzasystem settings\\nPIZZA_GROUP = 'dotkom'\\nPIZZA_ADMIN_GROUP = 'pizzaadmin'\\n\\n# Grappelli settings\\nGRAPPELLI_ADMIN_TITLE = 'Onlineweb'\\n\\n# Guardian settings\\nANONYMOUS_USER_ID = -1\\nGUARDIAN_RENDER_403 = True\\n\\n# Django-Taggit settings\\nTAGGIT_CASE_INSENSITIVE = True\\n\\n# List of usergroups that should be listed under \\\"Finn brukere\\\" in user profile\\nUSER_SEARCH_GROUPS = [\\n 16, # appKom\\n 1, # arrKom\\n 2, # banKom\\n 3, # bedKom\\n 4, # dotKom\\n 5, # eksKom\\n 14, # Eldsteradet\\n 6, # fagKom\\n 11, # Hovedstyret\\n 19, # jubKom\\n 10, # pangKom\\n 7, # proKom\\n 18, # seniorKom\\n 8, # triKom\\n 9, # velKom\\n 24, # itex\\n]\\n\\n#List of mailing lists, used in update_sympa_memcache_from_sql.py\\nPUBLIC_LISTS = [\\n \\\"foreninger\\\",\\n \\\"linjeforeninger\\\",\\n \\\"gloshaugen\\\",\\n \\\"dragvoll\\\",\\n \\\"masterforeninger\\\",\\n \\\"kjellere\\\",\\n \\\"linjeledere\\\",\\n \\\"linjeredaksjoner\\\",\\n \\\"glosfaddere\\\",\\n \\\"sr-samarbeid\\\",\\n \\\"ivt-samarbeid\\\",\\n \\\"linjekor\\\",\\n \\\"studentdemokratiet\\\"\\n]\\n\\nINSTALLED_APPS = (\\n # Third party dependencies\\n 'django.contrib.humanize',\\n 'django_nose',\\n 'django_nyt', # Wiki\\n 'mptt', # Wiki\\n 'sekizai', # Wiki\\n 'sorl.thumbnail', # Wiki\\n 'grappelli',\\n 'filebrowser',\\n 'chunks',\\n 'crispy_forms',\\n 'django_extensions',\\n 'django_dynamic_fixture',\\n 'oauth2_provider',\\n 'captcha',\\n 'compressor',\\n 'pdfdocument',\\n 'watson',\\n 'gunicorn',\\n 'markdown_deux',\\n 'djangoformsetjs',\\n 'reversion',\\n 'guardian',\\n 'stripe',\\n 'rest_framework',\\n 'django_filters',\\n 'taggit',\\n 'taggit_serializer',\\n 'corsheaders',\\n 'datetimewidget',\\n\\n # Django apps\\n 'django.contrib.admin',\\n 'django.contrib.auth',\\n 'django.contrib.contenttypes',\\n 'django.contrib.messages',\\n 'django.contrib.sessions',\\n 'django.contrib.sites',\\n 'django.contrib.staticfiles',\\n\\n # Onlineweb 4 apps\\n 'apps.api',\\n 'apps.approval',\\n 'apps.article',\\n 'apps.authentication',\\n 'apps.autoconfig',\\n 'apps.careeropportunity',\\n 'apps.companyprofile',\\n 'apps.dashboard',\\n 'apps.gallery',\\n 'apps.events',\\n 'apps.marks',\\n 'apps.offline',\\n 'apps.feedback',\\n 'apps.mommy',\\n 'apps.profiles',\\n 'apps.genfors',\\n 'apps.resourcecenter',\\n 'apps.mailinglists',\\n 'apps.inventory',\\n 'apps.payment',\\n 'apps.posters',\\n 'apps.sso',\\n 'apps.splash',\\n 'apps.shop',\\n 'apps.webshop',\\n 'scripts',\\n\\n #External apps\\n 'feedme',\\n 'redwine',\\n\\n #Wiki\\n 'wiki',\\n 'wiki.plugins.attachments',\\n 'wiki.plugins.images',\\n 'wiki.plugins.macros',\\n 'wiki.plugins.help',\\n 'wiki.plugins.links',\\n\\n)\\n\\n\\n# SSO / OAuth2 settings\\nif 'apps.sso' in INSTALLED_APPS:\\n from apps.sso.settings import OAUTH2_SCOPES\\n OAUTH2_PROVIDER = {\\n 'SCOPES': OAUTH2_SCOPES,\\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600,\\n 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,\\n }\\n OAUTH2_PROVIDER_APPLICATION_MODEL = 'sso.Client'\\n\\n# A sample logging configuration. The only tangible logging\\n# performed by this configuration is to send an email to\\n# the site admins on every HTTP 500 error when DEBUG=False.\\n# See http://docs.djangoproject.com/en/dev/topics/logging for\\n# more details on how to customize your logging configuration.\\nLOGGING = {\\n 'version': 1,\\n 'disable_existing_loggers': False,\\n 'filters': {\\n 'require_debug_false': {\\n '()': 'django.utils.log.RequireDebugFalse'\\n }\\n },\\n 'formatters': {\\n 'standard': {\\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\\n },\\n },\\n 'handlers': {\\n 'null': {\\n 'level': 'DEBUG',\\n 'class': 'logging.NullHandler',\\n },\\n 'mail_admins': {\\n 'level': 'ERROR',\\n 'filters': ['require_debug_false'],\\n 'class': 'django.utils.log.AdminEmailHandler'\\n },\\n 'console':{\\n 'level': 'DEBUG',\\n 'class': 'logging.StreamHandler',\\n 'formatter': 'standard'\\n }\\n },\\n 'loggers': {\\n 'django.security.DisallowedHost': {\\n 'handlers': ['null'],\\n 'propagate': False,\\n },\\n 'django.request': {\\n 'handlers': ['mail_admins'],\\n 'level': 'ERROR',\\n 'propagate': True,\\n },\\n 'feedback': {\\n 'handlers': ['console'],\\n 'level': 'DEBUG',\\n 'propagate': True,\\n },\\n 'syncer': {\\n 'handlers': ['console'],\\n 'level': 'DEBUG',\\n 'propagate': True,\\n },\\n '': {\\n 'handlers': ['console'],\\n 'level': 'DEBUG',\\n 'propagate': True,\\n },\\n }\\n}\\n\\n# crispy forms settings\\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\\n\\n# bootstrap messages classes\\nMESSAGE_TAGS = {messages.DEBUG: 'alert-debug',\\n messages.INFO: 'alert-info',\\n messages.SUCCESS: 'alert-success',\\n messages.WARNING: 'alert-warning',\\n messages.ERROR: 'alert-error'}\\n\\n\\n# Not really sure what this does.\\n# Has something to do with django-dynamic-fixture bumped from 1.6.4 to 1.6.5 in order to run a syncdb with mysql/postgres (OptimusCrime)\\nIMPORT_DDF_MODELS = False\\n\\n# Django REST framework\\nREST_FRAMEWORK = {\\n 'DEFAULT_AUTHENTICATION_CLASSES': (\\n 'rest_framework.authentication.SessionAuthentication', # Allows users to be logged in to browsable API\\n ),\\n 'DEFAULT_FILTER_BACKENDS': (\\n 'rest_framework.filters.DjangoFilterBackend',\\n 'rest_framework.filters.OrderingFilter',\\n ),\\n 'DEFAULT_PARSER_CLASSES': (\\n 'rest_framework.parsers.JSONParser',\\n 'rest_framework.parsers.FormParser',\\n 'rest_framework.parsers.MultiPartParser',\\n 'rest_framework.parsers.FileUploadParser',\\n ),\\n 'DEFAULT_RENDERER_CLASSES': [\\n 'rest_framework.renderers.JSONRenderer',\\n 'rest_framework.renderers.BrowsableAPIRenderer',\\n 'rest_framework.renderers.AdminRenderer',\\n ],\\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\\n 'PAGE_SIZE': 10\\n}\\n\\nCORS_ORIGIN_ALLOW_ALL = True\\nCORS_URLS_REGEX = r'^/api/v1/.*$' # Enables CORS on /api/v1/ endpoints only\\n\\n# Remember to keep 'local' last, so it can override any setting.\\nfor settings_module in ['filebrowser', 'django_wiki', 'local']: # local last\\n if not os.path.exists(os.path.join(PROJECT_SETTINGS_DIRECTORY,\\n settings_module + \\\".py\\\")):\\n sys.stderr.write(\\\"Could not find settings module '%s'.\\\\n\\\" %\\n settings_module)\\n if settings_module == 'local':\\n sys.stderr.write(\\\"You need to copy the settings file \\\"\\n \\\"'onlineweb4/settings/example-local.py' to \\\"\\n \\\"'onlineweb4/settings/local.py'.\\\\n\\\")\\n sys.exit(1)\\n try:\\n exec('from .%s import *' % settings_module)\\n except ImportError as e:\\n print(\\\"Could not import settings for '%s' : %s\\\" % (settings_module,\\n str(e)))\\n\",\n \"path\": \"onlineweb4/settings/base.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# -*- coding: utf8 -*-\nimport os\nimport sys\n\nimport wiki\nfrom django.contrib.messages import constants as messages\n\n# Directory that contains this file.\nPROJECT_SETTINGS_DIRECTORY = os.path.dirname(globals()['__file__'])\n# Root directory. Contains manage.py\nPROJECT_ROOT_DIRECTORY = os.path.join(PROJECT_SETTINGS_DIRECTORY, '..', '..')\n\nTEST_RUNNER = \"django_nose.NoseTestSuiteRunner\"\n\nNOSE_ARGS = ['--with-coverage', '--cover-package=apps', '--cover-html-dir=coverage', '--cover-xml', '--cover-html']\n\nDEBUG = False\n\nADMINS = (\n ('dotKom', 'dotkom@online.ntnu.no'),\n)\nMANAGERS = ADMINS\n\n# Email settings\nDEFAULT_FROM_EMAIL = 'online@online.ntnu.no'\nEMAIL_ARRKOM = 'arrkom@online.ntnu.no'\nEMAIL_BEDKOM = 'bedkom@online.ntnu.no'\nEMAIL_DOTKOM = 'dotkom@online.ntnu.no'\nEMAIL_EKSKOM = 'ekskom@online.ntnu.no'\nEMAIL_FAGKOM = 'fagkom@online.ntnu.no'\nEMAIL_PROKOM = 'prokom@online.ntnu.no'\nEMAIL_TRIKOM = 'trikom@online.ntnu.no'\n\n# We will receive errors and other django messages from this email\nSERVER_EMAIL = 'onlineweb4-error@online.ntnu.no'\n\nTIME_ZONE = 'Europe/Oslo'\n\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'nb'\nLANGUAGES = (\n ('nb', 'Norwegian'),\n ('en_US', 'English'),\n )\nLOCALE_PATHS = [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'locale'),\n]\n\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDATETIME_FORMAT = 'N j, Y, H:i'\nSECRET_KEY = 'override-this-in-local.py'\n\n# Session cookie expires after one year\nSESSION_COOKIE_AGE = 31540000\n\n# Override this in local if you need to :)\nBASE_URL = 'https://online.ntnu.no'\n\nAUTH_USER_MODEL = 'authentication.OnlineUser'\nLOGIN_URL = '/auth/login/'\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'uploaded_media') # Override this in local.py in prod.\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'static')\nSTATIC_URL = '/static/'\n\n# Prefix for default profile picture\nDEFAULT_PROFILE_PICTURE_PREFIX = os.path.join(STATIC_URL, \"img\", \"profile_default\")\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT_DIRECTORY, 'files/static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# Including django-wiki static files so we can import the less files.\nDJANGO_WIKI_STATIC = os.path.join(os.path.dirname(wiki.__file__), 'static')\n\nCOMPRESS_FILES = True\nCOMPRESS_OUTPUT_DIR = 'cache'\nCOMPRESS_PRECOMPILERS = (\n ('text/less', 'lessc --include-path=%s {infile} {outfile}' % DJANGO_WIKI_STATIC),\n)\n\nCOMPRESS_CSS_FILTERS = [\n 'compressor.filters.css_default.CssAbsoluteFilter',\n # We want this later on, but it breaks production so disabling for now.\n #'compressor-filters.cssmin.CSSMinFilter',\n]\nCOMPRESS_JS_FILTERS = [\n 'compressor.filters.jsmin.JSMinFilter',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'templates/')\n ],\n 'OPTIONS': {\n 'context_processors': [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"sekizai.context_processors.sekizai\", # Wiki\n \"onlineweb4.context_processors.analytics\",\n ],\n 'debug': DEBUG,\n }\n }\n]\n\nMIDDLEWARE_CLASSES = (\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'middleware.http.Http403Middleware',\n 'reversion.middleware.RevisionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend', # this is default\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nROOT_URLCONF = 'onlineweb4.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'onlineweb4.wsgi.application'\n\n# Pizzasystem settings\nPIZZA_GROUP = 'dotkom'\nPIZZA_ADMIN_GROUP = 'pizzaadmin'\n\n# Grappelli settings\nGRAPPELLI_ADMIN_TITLE = 'Onlineweb'\n\n# Guardian settings\nANONYMOUS_USER_ID = -1\nGUARDIAN_RENDER_403 = True\n\n# Django-Taggit settings\nTAGGIT_CASE_INSENSITIVE = True\n\n# List of usergroups that should be listed under \"Finn brukere\" in user profile\nUSER_SEARCH_GROUPS = [\n 16, # appKom\n 1, # arrKom\n 2, # banKom\n 3, # bedKom\n 4, # dotKom\n 5, # eksKom\n 14, # Eldsteradet\n 6, # fagKom\n 11, # Hovedstyret\n 19, # jubKom\n 10, # pangKom\n 7, # proKom\n 18, # seniorKom\n 8, # triKom\n 9, # velKom\n 24, # itex\n]\n\n#List of mailing lists, used in update_sympa_memcache_from_sql.py\nPUBLIC_LISTS = [\n \"foreninger\",\n \"linjeforeninger\",\n \"gloshaugen\",\n \"dragvoll\",\n \"masterforeninger\",\n \"kjellere\",\n \"linjeledere\",\n \"linjeredaksjoner\",\n \"glosfaddere\",\n \"sr-samarbeid\",\n \"ivt-samarbeid\",\n \"linjekor\",\n \"studentdemokratiet\"\n]\n\nINSTALLED_APPS = (\n # Third party dependencies\n 'django.contrib.humanize',\n 'django_nose',\n 'django_nyt', # Wiki\n 'mptt', # Wiki\n 'sekizai', # Wiki\n 'sorl.thumbnail', # Wiki\n 'grappelli',\n 'filebrowser',\n 'chunks',\n 'crispy_forms',\n 'django_extensions',\n 'django_dynamic_fixture',\n 'oauth2_provider',\n 'captcha',\n 'compressor',\n 'pdfdocument',\n 'watson',\n 'markdown_deux',\n 'djangoformsetjs',\n 'reversion',\n 'guardian',\n 'stripe',\n 'rest_framework',\n 'django_filters',\n 'taggit',\n 'taggit_serializer',\n 'corsheaders',\n 'datetimewidget',\n\n # Django apps\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n # Onlineweb 4 apps\n 'apps.api',\n 'apps.approval',\n 'apps.article',\n 'apps.authentication',\n 'apps.autoconfig',\n 'apps.careeropportunity',\n 'apps.companyprofile',\n 'apps.dashboard',\n 'apps.gallery',\n 'apps.events',\n 'apps.marks',\n 'apps.offline',\n 'apps.feedback',\n 'apps.mommy',\n 'apps.profiles',\n 'apps.genfors',\n 'apps.resourcecenter',\n 'apps.mailinglists',\n 'apps.inventory',\n 'apps.payment',\n 'apps.posters',\n 'apps.sso',\n 'apps.splash',\n 'apps.shop',\n 'apps.webshop',\n 'scripts',\n\n #External apps\n 'feedme',\n 'redwine',\n\n #Wiki\n 'wiki',\n 'wiki.plugins.attachments',\n 'wiki.plugins.images',\n 'wiki.plugins.macros',\n 'wiki.plugins.help',\n 'wiki.plugins.links',\n\n)\n\n\n# SSO / OAuth2 settings\nif 'apps.sso' in INSTALLED_APPS:\n from apps.sso.settings import OAUTH2_SCOPES\n OAUTH2_PROVIDER = {\n 'SCOPES': OAUTH2_SCOPES,\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600,\n 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,\n }\n OAUTH2_PROVIDER_APPLICATION_MODEL = 'sso.Client'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n }\n },\n 'loggers': {\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'feedback': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'syncer': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n# crispy forms settings\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# bootstrap messages classes\nMESSAGE_TAGS = {messages.DEBUG: 'alert-debug',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-error'}\n\n\n# Not really sure what this does.\n# Has something to do with django-dynamic-fixture bumped from 1.6.4 to 1.6.5 in order to run a syncdb with mysql/postgres (OptimusCrime)\nIMPORT_DDF_MODELS = False\n\n# Django REST framework\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication', # Allows users to be logged in to browsable API\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n 'rest_framework.parsers.FileUploadParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework.renderers.AdminRenderer',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 10\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/v1/.*$' # Enables CORS on /api/v1/ endpoints only\n\n# Remember to keep 'local' last, so it can override any setting.\nfor settings_module in ['filebrowser', 'django_wiki', 'local']: # local last\n if not os.path.exists(os.path.join(PROJECT_SETTINGS_DIRECTORY,\n settings_module + \".py\")):\n sys.stderr.write(\"Could not find settings module '%s'.\\n\" %\n settings_module)\n if settings_module == 'local':\n sys.stderr.write(\"You need to copy the settings file \"\n \"'onlineweb4/settings/example-local.py' to \"\n \"'onlineweb4/settings/local.py'.\\n\")\n sys.exit(1)\n try:\n exec('from .%s import *' % settings_module)\n except ImportError as e:\n print(\"Could not import settings for '%s' : %s\" % (settings_module,\n str(e)))\n","path":"onlineweb4/settings/base.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf8 -*-\\nimport os\\nimport sys\\n\\nimport wiki\\nfrom django.contrib.messages import constants as messages\\n\\n# Directory that contains this file.\\nPROJECT_SETTINGS_DIRECTORY = os.path.dirname(globals()['__file__'])\\n# Root directory. Contains manage.py\\nPROJECT_ROOT_DIRECTORY = os.path.join(PROJECT_SETTINGS_DIRECTORY, '..', '..')\\n\\nTEST_RUNNER = \\\"django_nose.NoseTestSuiteRunner\\\"\\n\\nNOSE_ARGS = ['--with-coverage', '--cover-package=apps', '--cover-html-dir=coverage', '--cover-xml', '--cover-html']\\n\\nDEBUG = False\\n\\nADMINS = (\\n ('dotKom', 'dotkom@online.ntnu.no'),\\n)\\nMANAGERS = ADMINS\\n\\n# Email settings\\nDEFAULT_FROM_EMAIL = 'online@online.ntnu.no'\\nEMAIL_ARRKOM = 'arrkom@online.ntnu.no'\\nEMAIL_BEDKOM = 'bedkom@online.ntnu.no'\\nEMAIL_DOTKOM = 'dotkom@online.ntnu.no'\\nEMAIL_EKSKOM = 'ekskom@online.ntnu.no'\\nEMAIL_FAGKOM = 'fagkom@online.ntnu.no'\\nEMAIL_PROKOM = 'prokom@online.ntnu.no'\\nEMAIL_TRIKOM = 'trikom@online.ntnu.no'\\n\\n# We will receive errors and other django messages from this email\\nSERVER_EMAIL = 'onlineweb4-error@online.ntnu.no'\\n\\nTIME_ZONE = 'Europe/Oslo'\\n\\n# http://www.i18nguy.com/unicode/language-identifiers.html\\nLANGUAGE_CODE = 'nb'\\nLANGUAGES = (\\n ('nb', 'Norwegian'),\\n ('en_US', 'English'),\\n )\\nLOCALE_PATHS = [\\n os.path.join(PROJECT_ROOT_DIRECTORY, 'locale'),\\n]\\n\\nSITE_ID = 1\\nUSE_I18N = True\\nUSE_L10N = True\\nUSE_TZ = True\\nDATETIME_FORMAT = 'N j, Y, H:i'\\nSECRET_KEY = 'override-this-in-local.py'\\n\\n# Session cookie expires after one year\\nSESSION_COOKIE_AGE = 31540000\\n\\n# Override this in local if you need to :)\\nBASE_URL = 'https://online.ntnu.no'\\n\\nAUTH_USER_MODEL = 'authentication.OnlineUser'\\nLOGIN_URL = '/auth/login/'\\n\\nMEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'uploaded_media') # Override this in local.py in prod.\\nMEDIA_URL = '/media/'\\n\\nSTATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'static')\\nSTATIC_URL = '/static/'\\n\\n# Prefix for default profile picture\\nDEFAULT_PROFILE_PICTURE_PREFIX = os.path.join(STATIC_URL, \\\"img\\\", \\\"profile_default\\\")\\n\\n# Additional locations of static files\\nSTATICFILES_DIRS = (\\n os.path.join(PROJECT_ROOT_DIRECTORY, 'files/static'),\\n)\\n\\nSTATICFILES_FINDERS = (\\n 'django.contrib.staticfiles.finders.FileSystemFinder',\\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\\n 'compressor.finders.CompressorFinder',\\n)\\n\\n# Including django-wiki static files so we can import the less files.\\nDJANGO_WIKI_STATIC = os.path.join(os.path.dirname(wiki.__file__), 'static')\\n\\nCOMPRESS_FILES = True\\nCOMPRESS_OUTPUT_DIR = 'cache'\\nCOMPRESS_PRECOMPILERS = (\\n ('text/less', 'lessc --include-path=%s {infile} {outfile}' % DJANGO_WIKI_STATIC),\\n)\\n\\nCOMPRESS_CSS_FILTERS = [\\n 'compressor.filters.css_default.CssAbsoluteFilter',\\n # We want this later on, but it breaks production so disabling for now.\\n #'compressor-filters.cssmin.CSSMinFilter',\\n]\\nCOMPRESS_JS_FILTERS = [\\n 'compressor.filters.jsmin.JSMinFilter',\\n]\\n\\nTEMPLATES = [\\n {\\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\\n 'APP_DIRS': True,\\n 'DIRS': [\\n os.path.join(PROJECT_ROOT_DIRECTORY, 'templates/')\\n ],\\n 'OPTIONS': {\\n 'context_processors': [\\n \\\"django.contrib.auth.context_processors.auth\\\",\\n \\\"django.template.context_processors.debug\\\",\\n \\\"django.template.context_processors.i18n\\\",\\n \\\"django.template.context_processors.media\\\",\\n \\\"django.template.context_processors.request\\\",\\n \\\"django.template.context_processors.static\\\",\\n \\\"django.template.context_processors.tz\\\",\\n \\\"django.contrib.messages.context_processors.messages\\\",\\n \\\"sekizai.context_processors.sekizai\\\", # Wiki\\n \\\"onlineweb4.context_processors.analytics\\\",\\n ],\\n 'debug': DEBUG,\\n }\\n }\\n]\\n\\nMIDDLEWARE_CLASSES = (\\n 'corsheaders.middleware.CorsMiddleware',\\n 'django.middleware.common.CommonMiddleware',\\n 'django.contrib.sessions.middleware.SessionMiddleware',\\n 'django.middleware.csrf.CsrfViewMiddleware',\\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\\n 'django.contrib.messages.middleware.MessageMiddleware',\\n 'middleware.http.Http403Middleware',\\n 'reversion.middleware.RevisionMiddleware',\\n # Uncomment the next line for simple clickjacking protection:\\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\\n)\\n\\nAUTHENTICATION_BACKENDS = (\\n 'django.contrib.auth.backends.ModelBackend', # this is default\\n 'guardian.backends.ObjectPermissionBackend',\\n)\\n\\nROOT_URLCONF = 'onlineweb4.urls'\\n\\n# Python dotted path to the WSGI application used by Django's runserver.\\nWSGI_APPLICATION = 'onlineweb4.wsgi.application'\\n\\n# Pizzasystem settings\\nPIZZA_GROUP = 'dotkom'\\nPIZZA_ADMIN_GROUP = 'pizzaadmin'\\n\\n# Grappelli settings\\nGRAPPELLI_ADMIN_TITLE = 'Onlineweb'\\n\\n# Guardian settings\\nANONYMOUS_USER_ID = -1\\nGUARDIAN_RENDER_403 = True\\n\\n# Django-Taggit settings\\nTAGGIT_CASE_INSENSITIVE = True\\n\\n# List of usergroups that should be listed under \\\"Finn brukere\\\" in user profile\\nUSER_SEARCH_GROUPS = [\\n 16, # appKom\\n 1, # arrKom\\n 2, # banKom\\n 3, # bedKom\\n 4, # dotKom\\n 5, # eksKom\\n 14, # Eldsteradet\\n 6, # fagKom\\n 11, # Hovedstyret\\n 19, # jubKom\\n 10, # pangKom\\n 7, # proKom\\n 18, # seniorKom\\n 8, # triKom\\n 9, # velKom\\n 24, # itex\\n]\\n\\n#List of mailing lists, used in update_sympa_memcache_from_sql.py\\nPUBLIC_LISTS = [\\n \\\"foreninger\\\",\\n \\\"linjeforeninger\\\",\\n \\\"gloshaugen\\\",\\n \\\"dragvoll\\\",\\n \\\"masterforeninger\\\",\\n \\\"kjellere\\\",\\n \\\"linjeledere\\\",\\n \\\"linjeredaksjoner\\\",\\n \\\"glosfaddere\\\",\\n \\\"sr-samarbeid\\\",\\n \\\"ivt-samarbeid\\\",\\n \\\"linjekor\\\",\\n \\\"studentdemokratiet\\\"\\n]\\n\\nINSTALLED_APPS = (\\n # Third party dependencies\\n 'django.contrib.humanize',\\n 'django_nose',\\n 'django_nyt', # Wiki\\n 'mptt', # Wiki\\n 'sekizai', # Wiki\\n 'sorl.thumbnail', # Wiki\\n 'grappelli',\\n 'filebrowser',\\n 'chunks',\\n 'crispy_forms',\\n 'django_extensions',\\n 'django_dynamic_fixture',\\n 'oauth2_provider',\\n 'captcha',\\n 'compressor',\\n 'pdfdocument',\\n 'watson',\\n 'markdown_deux',\\n 'djangoformsetjs',\\n 'reversion',\\n 'guardian',\\n 'stripe',\\n 'rest_framework',\\n 'django_filters',\\n 'taggit',\\n 'taggit_serializer',\\n 'corsheaders',\\n 'datetimewidget',\\n\\n # Django apps\\n 'django.contrib.admin',\\n 'django.contrib.auth',\\n 'django.contrib.contenttypes',\\n 'django.contrib.messages',\\n 'django.contrib.sessions',\\n 'django.contrib.sites',\\n 'django.contrib.staticfiles',\\n\\n # Onlineweb 4 apps\\n 'apps.api',\\n 'apps.approval',\\n 'apps.article',\\n 'apps.authentication',\\n 'apps.autoconfig',\\n 'apps.careeropportunity',\\n 'apps.companyprofile',\\n 'apps.dashboard',\\n 'apps.gallery',\\n 'apps.events',\\n 'apps.marks',\\n 'apps.offline',\\n 'apps.feedback',\\n 'apps.mommy',\\n 'apps.profiles',\\n 'apps.genfors',\\n 'apps.resourcecenter',\\n 'apps.mailinglists',\\n 'apps.inventory',\\n 'apps.payment',\\n 'apps.posters',\\n 'apps.sso',\\n 'apps.splash',\\n 'apps.shop',\\n 'apps.webshop',\\n 'scripts',\\n\\n #External apps\\n 'feedme',\\n 'redwine',\\n\\n #Wiki\\n 'wiki',\\n 'wiki.plugins.attachments',\\n 'wiki.plugins.images',\\n 'wiki.plugins.macros',\\n 'wiki.plugins.help',\\n 'wiki.plugins.links',\\n\\n)\\n\\n\\n# SSO / OAuth2 settings\\nif 'apps.sso' in INSTALLED_APPS:\\n from apps.sso.settings import OAUTH2_SCOPES\\n OAUTH2_PROVIDER = {\\n 'SCOPES': OAUTH2_SCOPES,\\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600,\\n 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,\\n }\\n OAUTH2_PROVIDER_APPLICATION_MODEL = 'sso.Client'\\n\\n# A sample logging configuration. The only tangible logging\\n# performed by this configuration is to send an email to\\n# the site admins on every HTTP 500 error when DEBUG=False.\\n# See http://docs.djangoproject.com/en/dev/topics/logging for\\n# more details on how to customize your logging configuration.\\nLOGGING = {\\n 'version': 1,\\n 'disable_existing_loggers': False,\\n 'filters': {\\n 'require_debug_false': {\\n '()': 'django.utils.log.RequireDebugFalse'\\n }\\n },\\n 'formatters': {\\n 'standard': {\\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\\n },\\n },\\n 'handlers': {\\n 'null': {\\n 'level': 'DEBUG',\\n 'class': 'logging.NullHandler',\\n },\\n 'mail_admins': {\\n 'level': 'ERROR',\\n 'filters': ['require_debug_false'],\\n 'class': 'django.utils.log.AdminEmailHandler'\\n },\\n 'console':{\\n 'level': 'DEBUG',\\n 'class': 'logging.StreamHandler',\\n 'formatter': 'standard'\\n }\\n },\\n 'loggers': {\\n 'django.security.DisallowedHost': {\\n 'handlers': ['null'],\\n 'propagate': False,\\n },\\n 'django.request': {\\n 'handlers': ['mail_admins'],\\n 'level': 'ERROR',\\n 'propagate': True,\\n },\\n 'feedback': {\\n 'handlers': ['console'],\\n 'level': 'DEBUG',\\n 'propagate': True,\\n },\\n 'syncer': {\\n 'handlers': ['console'],\\n 'level': 'DEBUG',\\n 'propagate': True,\\n },\\n '': {\\n 'handlers': ['console'],\\n 'level': 'DEBUG',\\n 'propagate': True,\\n },\\n }\\n}\\n\\n# crispy forms settings\\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\\n\\n# bootstrap messages classes\\nMESSAGE_TAGS = {messages.DEBUG: 'alert-debug',\\n messages.INFO: 'alert-info',\\n messages.SUCCESS: 'alert-success',\\n messages.WARNING: 'alert-warning',\\n messages.ERROR: 'alert-error'}\\n\\n\\n# Not really sure what this does.\\n# Has something to do with django-dynamic-fixture bumped from 1.6.4 to 1.6.5 in order to run a syncdb with mysql/postgres (OptimusCrime)\\nIMPORT_DDF_MODELS = False\\n\\n# Django REST framework\\nREST_FRAMEWORK = {\\n 'DEFAULT_AUTHENTICATION_CLASSES': (\\n 'rest_framework.authentication.SessionAuthentication', # Allows users to be logged in to browsable API\\n ),\\n 'DEFAULT_FILTER_BACKENDS': (\\n 'rest_framework.filters.DjangoFilterBackend',\\n 'rest_framework.filters.OrderingFilter',\\n ),\\n 'DEFAULT_PARSER_CLASSES': (\\n 'rest_framework.parsers.JSONParser',\\n 'rest_framework.parsers.FormParser',\\n 'rest_framework.parsers.MultiPartParser',\\n 'rest_framework.parsers.FileUploadParser',\\n ),\\n 'DEFAULT_RENDERER_CLASSES': [\\n 'rest_framework.renderers.JSONRenderer',\\n 'rest_framework.renderers.BrowsableAPIRenderer',\\n 'rest_framework.renderers.AdminRenderer',\\n ],\\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\\n 'PAGE_SIZE': 10\\n}\\n\\nCORS_ORIGIN_ALLOW_ALL = True\\nCORS_URLS_REGEX = r'^/api/v1/.*$' # Enables CORS on /api/v1/ endpoints only\\n\\n# Remember to keep 'local' last, so it can override any setting.\\nfor settings_module in ['filebrowser', 'django_wiki', 'local']: # local last\\n if not os.path.exists(os.path.join(PROJECT_SETTINGS_DIRECTORY,\\n settings_module + \\\".py\\\")):\\n sys.stderr.write(\\\"Could not find settings module '%s'.\\\\n\\\" %\\n settings_module)\\n if settings_module == 'local':\\n sys.stderr.write(\\\"You need to copy the settings file \\\"\\n \\\"'onlineweb4/settings/example-local.py' to \\\"\\n \\\"'onlineweb4/settings/local.py'.\\\\n\\\")\\n sys.exit(1)\\n try:\\n exec('from .%s import *' % settings_module)\\n except ImportError as e:\\n print(\\\"Could not import settings for '%s' : %s\\\" % (settings_module,\\n str(e)))\\n\",\n \"path\": \"onlineweb4/settings/base.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/onlineweb4/settings/base.py b/onlineweb4/settings/base.py\nindex 565de0a3c..f7b3961b5 100644\n--- a/onlineweb4/settings/base.py\n+++ b/onlineweb4/settings/base.py\n@@ -219,7 +219,6 @@\n 'compressor',\n 'pdfdocument',\n 'watson',\n- 'gunicorn',\n 'markdown_deux',\n 'djangoformsetjs',\n 'reversion',\ndiff --git a/requirements-prod.txt b/requirements-prod.txt\nnew file mode 100644\nindex 000000000..e116f5559\n--- /dev/null\n+++ b/requirements-prod.txt\n@@ -0,0 +1,2 @@\n+gunicorn==19.0 # Used as a http server in production\n+psycopg2 # Postgres adapter\ndiff --git a/requirements-testing.txt b/requirements-testing.txt\nnew file mode 100644\nindex 000000000..18c7c3290\n--- /dev/null\n+++ b/requirements-testing.txt\n@@ -0,0 +1,8 @@\n+cov-core>=1.7\n+coverage>=4.0.1\n+factory-boy>=1.1.3\n+lettuce>=0.2.20\n+nose-cov>=1.6\n+teamcity-messages>=1.16\n+flake8>=2.4.1\n+pepper8>=1.0.4 # Transforms flake8 output to HTML report + TC messages\ndiff --git a/requirements.txt b/requirements.txt\nindex 8195d3af7..4c25ac940 100644\n--- a/requirements.txt\n+++ b/requirements.txt\n@@ -7,9 +7,7 @@ django-filter==0.11.0 # Filtering for DRF\n # Upstream is missing Python 3 patches\n git+https://github.com/JelteF/python-memcached@patch-1#egg=python-memcached==1.58\n markdown2==2.3.0 # textarea text formatting\n-psycopg2 # Postgres adapter\n-pytz # Timezone lib. Obsolete?\n-gunicorn==19.0\n+pytz # Timezone lib\n icalendar==3.8.4\n stripe==1.20.2\n \n@@ -39,21 +37,12 @@ pdfdocument==3.1\n Unidecode==0.4.17 # Translates every unicode symbol to the closest ascii. online_mail_usernames\n django-markdown-deux==1.0.5\n django-formset-js==0.4.3\n-werkzeug==0.10.4\n django-cors-headers==1.1.0 # Enable CORS for Nibble\n django-datetime-widget==0.9.3 # Datetime widget for forms\n \n #test tools\n-cov-core==1.7\n-coverage==4.0.1\n-django-nose>=1.4,<1.5\n-factory-boy==1.1.3\n-lettuce==0.2.20\n-nose==1.3.4\n-nose-cov==1.6\n-teamcity-messages==1.16\n-flake8==2.4.1\n-pepper8>=1.0.4 # Transforms flake8 output to HTML report + TC messages\n+django-nose>=1.4,<1.5 # We use this test runner locally\n+nose==1.3.4 # We use this test runner locally\n \n # Frigg\n requests[security]==2.8.0\n@@ -71,4 +60,3 @@ django-sekizai>=0.9\n # imagemagick # From package repository. Tested with version == 8:6.7.7.10.\n # libjpeg-dev\t\t\t\t\t# From package repository.\n # libpq-dev\t\t\t\t\t\t# From package repository.\n-\ndiff --git a/tox.ini b/tox.ini\nindex bf7efaf24..ba0dc21bf 100644\n--- a/tox.ini\n+++ b/tox.ini\n@@ -15,7 +15,9 @@ commands =\n coverage: coverage xml\n \n [testenv:tests]\n-deps = -r{toxinidir}/requirements.txt\n+deps = \n+ -r{toxinidir}/requirements.txt\n+ -r{toxinidir}/requirements-testing.txt\n whitelist_externals =\n /bin/cp\n setenv =\n"}}},{"rowIdx":413,"cells":{"in_source_id":{"kind":"string","value":"apache__airflow-22886"},"issue":{"kind":"string","value":"Databricks SQL fails on Python 3.10\n### Apache Airflow Provider(s)\n\ndatabricks\n\n### Versions of Apache Airflow Providers\n\nThe databricks SQL does not work on Python 3.10 due to \"from collections import Iterable\" in the `databricks-sql-connector`\r\n\r\n* https://pypi.org/project/databricks-sql-connector/\r\n\r\nDetails of this issue dicussed in https://github.com/apache/airflow/pull/22050\r\n\r\nFor now we will likely just exclude the tests (and mark databricks provider as non-python 3.10 compatible). But once this is fixed (in either 1.0.2 or upcoming 2.0.0 version of the library, we wil restore it back). \n\n### Apache Airflow version\n\nmain (development)\n\n### Operating System\n\nAll\n\n### Deployment\n\nOther\n\n### Deployment details\n\nJust Breeze with Python 3.10\n\n### What happened\n\nThe tests are failing:\r\n\r\n```\r\n self = \r\n item = ['file1', 'file2', 'file3']\r\n \r\n def escape_item(self, item):\r\n if item is None:\r\n return 'NULL'\r\n elif isinstance(item, (int, float)):\r\n return self.escape_number(item)\r\n elif isinstance(item, basestring):\r\n return self.escape_string(item)\r\n > elif isinstance(item, collections.Iterable):\r\n E AttributeError: module 'collections' has no attribute 'Iterable'\r\n ```\r\n\r\nhttps://github.com/apache/airflow/runs/5523057543?check_suite_focus=true#step:8:16781 \r\n\n\n### What you expected to happen\n\nTest succeed :)\n\n### How to reproduce\n\nRun `TestDatabricksSqlCopyIntoOperator` in Python 3.10 environment.\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n"},"before_files":{"kind":"list like","value":[{"content":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom copy import deepcopy\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Setuptools patches this import to point to a vendored copy instead of the\n# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.\nfrom distutils import log # isort: skip\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\nPY39 = sys.version_info >= (3, 9)\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.3.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]) -> None:\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e:\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self) -> None:\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])) -> None:\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\npandas_requirement = 'pandas>=0.17.1'\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\nalibaba = [\n 'oss2>=2.14.0',\n]\namazon = [\n 'boto3>=1.15.0',\n # watchtower 3 has been released end Jan and introduced breaking change across the board that might\n # change logging behaviour:\n # https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26\n # TODO: update to watchtower >3\n 'watchtower~=2.0.1',\n 'jsonpath_ng>=1.5.3',\n 'redshift_connector>=2.0.888',\n 'sqlalchemy_redshift>=0.8.6',\n pandas_requirement,\n 'mypy-boto3-rds>=1.21.0',\n 'mypy-boto3-redshift-data>=1.21.0',\n]\napache_beam = [\n 'apache-beam>=2.33.0',\n]\narangodb = ['python-arango>=7.3.2']\nasana = ['asana>=0.10']\nasync_packages = [\n 'eventlet>=0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=4.0.0',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault-secrets>=4.1.0,<5.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n # Azure integration uses old librarires and the limits below reflect that\n # TODO: upgrade to newer versions of all the below libraries\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n # limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9\n 'azure-storage-blob>=12.7.0,<12.9.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0',\n]\ncelery = [\n # The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer\n # (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).\n # This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only\n # deliberately bump the version when we tested it, and we know it can be bumped.\n # Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'celery>=5.2.3,<6',\n 'flower>=1.0.0',\n]\ncgroups = [\n # Cgroupspy 0.2.2 added Python 3.10 compatibility\n 'cgroupspy>=0.2.2',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = [\n # Dask support is limited, we need Dask team to upgrade support for dask if we were to continue\n # Supporting it in the future\n 'cloudpickle>=1.4.1',\n 'dask>=2.9.0',\n 'distributed>=2.11.1',\n]\ndatabricks = [\n 'requests>=2.26.0, <3',\n 'databricks-sql-connector>=1.0.0, <2.0.0',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndeprecated_api = [\n 'requests>=2.26.0',\n]\ndoc = [\n 'click>=8.0',\n 'sphinx>=4.4.0',\n # Docutils 0.17.0 converts generated
into
and breaks our doc formatting\n # By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle\n #
tags for sections\n 'docutils<0.17.0',\n # Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,\n # even though Sphinx 4.4.0 has this but with python_version<3.10.\n 'importlib-metadata>=4.4; python_version < \"3.8\"',\n 'sphinx-airflow-theme',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi>=1.8.0',\n 'sphinx-copybutton',\n 'sphinx-jinja>=2.0',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling>=7.3',\n]\ndocker = [\n 'docker>=5.0.3',\n]\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7',\n 'elasticsearch-dbapi',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = ['pyexasol>=0.5.1', pandas_requirement]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_appbuilder_authlib = [\n 'authlib',\n]\ngithub = [\n 'pygithub',\n]\ngoogle = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n # Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries\n # Introduced breaking changes across the board. Those libraries should be upgraded soon\n # TODO: Upgrade all Google libraries that are limited to <2.0.0\n 'PyOpenSSL',\n # The Google Ads 14.0.1 breaks PIP and eager upgrade as it requires\n # google-api-core>=2.0.0 which cannot be used yet (see below comment)\n # and https://github.com/apache/airflow/issues/18705#issuecomment-933746150\n 'google-ads>=12.0.0,<14.0.1',\n 'google-api-core>=2.7.0,<3.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n 'google-auth>=1.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-aiplatform>=1.7.1,<2.0.0',\n 'google-cloud-automl>=2.1.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-build>=3.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0',\n 'google-cloud-dataplex>=0.1.0',\n 'google-cloud-dataproc>=3.1.0',\n 'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1',\n 'google-cloud-memcache>=0.2.0',\n 'google-cloud-monitoring>=2.0.0',\n 'google-cloud-os-login>=2.0.0',\n 'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',\n 'google-cloud-pubsub>=2.0.0',\n 'google-cloud-redis>=2.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'httpx',\n 'json-merge-patch>=0.2',\n 'looker-sdk>=22.2.0',\n 'pandas-gbq',\n pandas_requirement,\n 'sqlalchemy-bigquery>=1.2.1',\n]\ngrpc = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.0.0, <3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac>=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0',\n # in case of Python 3.9 sasl library needs to be installed with version higher or equal than\n # 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls\n # the sasl library anyway (and there sasl library version is not relevant)\n 'sasl>=0.3.1; python_version>=\"3.9\"',\n 'thrift>=0.9.2',\n pandas_requirement,\n]\nhttp = [\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\n # release it as a requirement for airflow\n 'requests>=2.26.0',\n]\nhttp_provider = [\n 'apache-airflow-providers-http',\n]\ninfluxdb = [\n 'influxdb-client>=1.19.0',\n pandas_requirement,\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n # The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer\n # (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow\n # So we should limit it to the next MAJOR version and only deliberately bump the version when we\n # tested it, and we know it can be bumped. Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'kubernetes>=21.7.0,<24',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nleveldb = ['plyvel; platform_machine != \"aarch64\"']\nmongo = [\n 'dnspython>=1.13.0',\n # pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0\n # TODO: Upgrade to pymongo 4.0.0+\n 'pymongo>=3.6.0,<4.0.0',\n]\nmssql = [\n 'pymssql>=2.1.5; platform_machine != \"aarch64\"',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11; platform_machine != \"aarch64\"',\n 'mysqlclient>=1.3.6; platform_machine != \"aarch64\"',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\nopsgenie = [\n 'opsgenie-sdk>=2.1.5',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2',\n]\npandas = [\n pandas_requirement,\n]\npapermill = [\n 'papermill[all]>=1.2.1',\n 'scrapbook[all]',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2',\n]\nplexus = [\n 'arrow>=0.16.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = [\n # The limit to Presto 0.8 for unknown reason\n # TODO: Remove the limit\n 'presto-python-client>=0.7.0,<0.8',\n pandas_requirement,\n]\npsrp = [\n 'pypsrp>=0.8',\n]\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp',\n]\nredis = [\n # Redis 4 introduced a number of changes that likely need testing including mixins in redis commands\n # as well as unquoting URLS with `urllib.parse.unquote`:\n # https://github.com/redis/redis-py/blob/master/CHANGES\n # TODO: upgrade to support redis package >=4\n 'redis~=3.2',\n]\nsalesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]\nsamba = [\n 'smbprotocol>=1.5.0',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.3.2',\n]\nstatsd = [\n 'statsd>=3.3.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot>=13.0',\n]\ntrino = [\n 'trino>=0.301.0',\n pandas_requirement,\n]\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm>=0.4',\n]\nyandex = [\n 'yandexcloud>=0.146.0',\n]\nzendesk = [\n 'zenpy>=2.0.24',\n]\n# End dependencies group\n\n# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them\n# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\n# for details. Wy want to install them explicitly because we want to eventually move to\n# mypyd which does not support installing the types dynamically with --install-types\nmypy_dependencies = [\n # TODO: upgrade to newer versions of MyPy continuously as they are released\n 'mypy==0.910',\n 'types-boto',\n 'types-certifi',\n 'types-croniter',\n 'types-Deprecated',\n 'types-docutils',\n 'types-freezegun',\n 'types-paramiko',\n 'types-protobuf',\n 'types-python-dateutil',\n 'types-python-slugify',\n 'types-pytz',\n 'types-redis',\n 'types-requests',\n 'types-setuptools',\n 'types-termcolor',\n 'types-tabulate',\n 'types-toml',\n 'types-Markdown',\n 'types-PyMySQL',\n 'types-PyYAML',\n]\n\n# Dependencies needed for development only\ndevel_only = [\n 'aws_xray_sdk',\n 'beautifulsoup4>=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click>=8.0',\n 'coverage',\n 'filelock',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n # Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0\n # Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without\n # getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version\n # similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there\n # are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails\n # to resolve that github3 3.0.0 is the right version to use.\n # This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when\n # pip resolution is improved to handle the case. The issue which describes this PIP behaviour\n # and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924\n 'github3.py<3.1.0',\n 'gitpython',\n 'ipdb',\n 'jira',\n 'jsondiff',\n 'mongomock',\n 'moto>=3.1.0',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pypsrp',\n 'pygithub',\n 'pysftp',\n # Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit\n # It contains a number of potential breaking changes but none of them looks breaking our use\n # https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03\n # TODO: upgrade it and remove the limit\n 'pytest~=6.0',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-instafail',\n # We should attempt to remove the limit when we upgrade Pytest\n # TODO: remove the limit when we upgrade pytest\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'python-jose',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'pytest-httpx',\n 'requests_mock',\n 'rich_click',\n 'semver',\n 'twine',\n 'wheel',\n 'yamllint',\n]\n\ndevel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password\ndevel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': http_provider,\n 'alibaba': alibaba,\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.drill': drill,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': http_provider,\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'arangodb': arangodb,\n 'asana': asana,\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dbt.cloud': http_provider,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'github': github,\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': http,\n 'imap': [],\n 'influxdb': influxdb,\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.psrp': psrp,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': opsgenie,\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'trino': trino,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the CeleryExecutor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'deprecated_api': deprecated_api,\n 'github_enterprise': flask_appbuilder_authlib,\n 'google_auth': flask_appbuilder_authlib,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'leveldb': leveldb,\n 'pandas': pandas,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\n \"crypto\",\n \"webhdfs\",\n]\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\ndef add_all_deprecated_provider_packages() -> None:\n \"\"\"\n For deprecated aliases that are providers, we will swap the providers requirements to instead\n be the provider itself.\n\n e.g. {\"kubernetes\": [\"kubernetes>=3.0.0, <12.0.0\", ...]} becomes\n {\"kubernetes\": [\"apache-airflow-provider-cncf-kubernetes\"]}\n \"\"\"\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\n continue\n replace_extra_requirement_with_provider_packages(alias, [provider])\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.drill',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'arangodb',\n 'cloudant',\n 'databricks',\n 'exasol',\n 'influxdb',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'trino',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs + pandas\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = devel_all\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel # devel already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n The dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str) -> str:\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_excluded_providers() -> List[str]:\n \"\"\"Returns packages excluded for the current python version.\"\"\"\n return []\n\n\ndef get_all_provider_packages() -> str:\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n excluded_providers = get_excluded_providers()\n return \" \".join(\n get_provider_package_from_package_id(package)\n for package in PROVIDERS_REQUIREMENTS\n if package not in excluded_providers\n )\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"The setuptools.Distribution subclass with Airflow specific behaviour\"\"\"\n\n def __init__(self, attrs=None):\n super().__init__(attrs)\n self.install_requires = None\n\n def parse_config_files(self, *args, **kwargs) -> None:\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes its dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n For K8s and Celery which are both \"Core executors\" and \"Providers\" we have to\n add the base dependencies to core as well, in order to mitigate problems where\n newer version of provider will have less strict limits. This should be done for both\n extras and their deprecated aliases. This is not a full protection however, the way\n extras work, this will not add \"hard\" limits for Airflow and the user who does not use\n constraints.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n else:\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\n \"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\", \"trino\"]\n )\n add_all_deprecated_provider_packages()\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self) -> None: # type: ignore\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install, # type: ignore\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs, # type: ignore\n )\n\n\nif __name__ == \"__main__\":\n do_setup() # comment\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#\\n# Licensed to the Apache Software Foundation (ASF) under one\\n# or more contributor license agreements. See the NOTICE file\\n# distributed with this work for additional information\\n# regarding copyright ownership. The ASF licenses this file\\n# to you under the Apache License, Version 2.0 (the\\n# \\\"License\\\"); you may not use this file except in compliance\\n# with the License. You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing,\\n# software distributed under the License is distributed on an\\n# \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\\n# KIND, either express or implied. See the License for the\\n# specific language governing permissions and limitations\\n# under the License.\\n\\\"\\\"\\\"Setup.py for the Airflow project.\\\"\\\"\\\"\\nimport glob\\nimport logging\\nimport os\\nimport subprocess\\nimport sys\\nimport unittest\\nfrom copy import deepcopy\\nfrom os.path import dirname, relpath\\nfrom textwrap import wrap\\nfrom typing import Dict, List\\n\\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\\nfrom setuptools.command.develop import develop as develop_orig\\nfrom setuptools.command.install import install as install_orig\\n\\n# Setuptools patches this import to point to a vendored copy instead of the\\n# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.\\nfrom distutils import log # isort: skip\\n\\n# Controls whether providers are installed from packages or directly from sources\\n# It is turned on by default in case of development environments such as Breeze\\n# And it is particularly useful when you add a new provider and there is no\\n# PyPI version to install the provider package from\\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\\nPY39 = sys.version_info >= (3, 9)\\n\\nlogger = logging.getLogger(__name__)\\n\\nversion = '2.3.0.dev0'\\n\\nmy_dir = dirname(__file__)\\n\\n\\ndef airflow_test_suite() -> unittest.TestSuite:\\n \\\"\\\"\\\"Test suite for Airflow tests\\\"\\\"\\\"\\n test_loader = unittest.TestLoader()\\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\\n return test_suite\\n\\n\\nclass CleanCommand(Command):\\n \\\"\\\"\\\"\\n Command to tidy up the project root.\\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\\n \\\"\\\"\\\"\\n\\n description = \\\"Tidy up the project root\\\"\\n user_options: List[str] = []\\n\\n def initialize_options(self) -> None:\\n \\\"\\\"\\\"Set default values for options.\\\"\\\"\\\"\\n\\n def finalize_options(self) -> None:\\n \\\"\\\"\\\"Set final values for options.\\\"\\\"\\\"\\n\\n @staticmethod\\n def rm_all_files(files: List[str]) -> None:\\n \\\"\\\"\\\"Remove all files from the list\\\"\\\"\\\"\\n for file in files:\\n try:\\n os.remove(file)\\n except Exception as e:\\n logger.warning(\\\"Error when removing %s: %s\\\", file, e)\\n\\n def run(self) -> None:\\n \\\"\\\"\\\"Remove temporary files and directories.\\\"\\\"\\\"\\n os.chdir(my_dir)\\n self.rm_all_files(glob.glob('./build/*'))\\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\\n self.rm_all_files(glob.glob('./dist/*'))\\n self.rm_all_files(glob.glob('./*.egg-info'))\\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\\n\\n\\nclass CompileAssets(Command):\\n \\\"\\\"\\\"\\n Compile and build the frontend assets using yarn and webpack.\\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\\n \\\"\\\"\\\"\\n\\n description = \\\"Compile and build the frontend assets\\\"\\n user_options: List[str] = []\\n\\n def initialize_options(self) -> None:\\n \\\"\\\"\\\"Set default values for options.\\\"\\\"\\\"\\n\\n def finalize_options(self) -> None:\\n \\\"\\\"\\\"Set final values for options.\\\"\\\"\\\"\\n\\n def run(self) -> None:\\n \\\"\\\"\\\"Run a command to compile and build assets.\\\"\\\"\\\"\\n subprocess.check_call('./airflow/www/compile_assets.sh')\\n\\n\\nclass ListExtras(Command):\\n \\\"\\\"\\\"\\n List all available extras\\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\\n \\\"\\\"\\\"\\n\\n description = \\\"List available extras\\\"\\n user_options: List[str] = []\\n\\n def initialize_options(self) -> None:\\n \\\"\\\"\\\"Set default values for options.\\\"\\\"\\\"\\n\\n def finalize_options(self) -> None:\\n \\\"\\\"\\\"Set final values for options.\\\"\\\"\\\"\\n\\n def run(self) -> None:\\n \\\"\\\"\\\"List extras.\\\"\\\"\\\"\\n print(\\\"\\\\n\\\".join(wrap(\\\", \\\".join(EXTRAS_REQUIREMENTS.keys()), 100)))\\n\\n\\ndef git_version(version_: str) -> str:\\n \\\"\\\"\\\"\\n Return a version to identify the state of the underlying git repo. The version will\\n indicate whether the head of the current git-backed working directory is tied to a\\n release tag or not : it will indicate the former with a 'release:{version}' prefix\\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\\n branch head. Finally, a \\\"dirty\\\" suffix is appended to indicate that uncommitted\\n changes are present.\\n\\n :param str version_: Semver version\\n :return: Found Airflow version in Git repo\\n :rtype: str\\n \\\"\\\"\\\"\\n try:\\n import git\\n\\n try:\\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\\n except git.NoSuchPathError:\\n logger.warning('.git directory not found: Cannot compute the git version')\\n return ''\\n except git.InvalidGitRepositoryError:\\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\\n return ''\\n except ImportError:\\n logger.warning('gitpython not found: Cannot compute the git version.')\\n return ''\\n if repo:\\n sha = repo.head.commit.hexsha\\n if repo.is_dirty():\\n return f'.dev0+{sha}.dirty'\\n # commit is clean\\n return f'.release:{version_}+{sha}'\\n return 'no_git_version'\\n\\n\\ndef write_version(filename: str = os.path.join(*[my_dir, \\\"airflow\\\", \\\"git_version\\\"])) -> None:\\n \\\"\\\"\\\"\\n Write the Semver version + git hash to file, e.g. \\\".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\\\".\\n\\n :param str filename: Destination file to write\\n \\\"\\\"\\\"\\n text = f\\\"{git_version(version)}\\\"\\n with open(filename, 'w') as file:\\n file.write(text)\\n\\n\\npandas_requirement = 'pandas>=0.17.1'\\n\\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\\n# Start dependencies group\\nalibaba = [\\n 'oss2>=2.14.0',\\n]\\namazon = [\\n 'boto3>=1.15.0',\\n # watchtower 3 has been released end Jan and introduced breaking change across the board that might\\n # change logging behaviour:\\n # https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26\\n # TODO: update to watchtower >3\\n 'watchtower~=2.0.1',\\n 'jsonpath_ng>=1.5.3',\\n 'redshift_connector>=2.0.888',\\n 'sqlalchemy_redshift>=0.8.6',\\n pandas_requirement,\\n 'mypy-boto3-rds>=1.21.0',\\n 'mypy-boto3-redshift-data>=1.21.0',\\n]\\napache_beam = [\\n 'apache-beam>=2.33.0',\\n]\\narangodb = ['python-arango>=7.3.2']\\nasana = ['asana>=0.10']\\nasync_packages = [\\n 'eventlet>=0.9.7',\\n 'gevent>=0.13',\\n 'greenlet>=0.4.9',\\n]\\natlas = [\\n 'atlasclient>=0.1.2',\\n]\\nazure = [\\n 'azure-batch>=8.0.0',\\n 'azure-cosmos>=4.0.0',\\n 'azure-datalake-store>=0.0.45',\\n 'azure-identity>=1.3.1',\\n 'azure-keyvault-secrets>=4.1.0,<5.0',\\n 'azure-kusto-data>=0.0.43,<0.1',\\n # Azure integration uses old librarires and the limits below reflect that\\n # TODO: upgrade to newer versions of all the below libraries\\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\\n 'azure-mgmt-datalake-store>=0.5.0',\\n 'azure-mgmt-resource>=2.2.0',\\n # limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9\\n 'azure-storage-blob>=12.7.0,<12.9.0',\\n 'azure-storage-common>=2.1.0',\\n 'azure-storage-file>=2.1.0',\\n]\\ncassandra = [\\n 'cassandra-driver>=3.13.0',\\n]\\ncelery = [\\n # The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core\\n # Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer\\n # (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).\\n # This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only\\n # deliberately bump the version when we tested it, and we know it can be bumped.\\n # Bumping this version should also be connected with\\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\\n # potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow\\n # core is not hard-limited via install-requirements, only by extra).\\n 'celery>=5.2.3,<6',\\n 'flower>=1.0.0',\\n]\\ncgroups = [\\n # Cgroupspy 0.2.2 added Python 3.10 compatibility\\n 'cgroupspy>=0.2.2',\\n]\\ncloudant = [\\n 'cloudant>=2.0',\\n]\\ndask = [\\n # Dask support is limited, we need Dask team to upgrade support for dask if we were to continue\\n # Supporting it in the future\\n 'cloudpickle>=1.4.1',\\n 'dask>=2.9.0',\\n 'distributed>=2.11.1',\\n]\\ndatabricks = [\\n 'requests>=2.26.0, <3',\\n 'databricks-sql-connector>=1.0.0, <2.0.0',\\n]\\ndatadog = [\\n 'datadog>=0.14.0',\\n]\\ndeprecated_api = [\\n 'requests>=2.26.0',\\n]\\ndoc = [\\n 'click>=8.0',\\n 'sphinx>=4.4.0',\\n # Docutils 0.17.0 converts generated
into
and breaks our doc formatting\\n # By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle\\n #
tags for sections\\n 'docutils<0.17.0',\\n # Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,\\n # even though Sphinx 4.4.0 has this but with python_version<3.10.\\n 'importlib-metadata>=4.4; python_version < \\\"3.8\\\"',\\n 'sphinx-airflow-theme',\\n 'sphinx-argparse>=0.1.13',\\n 'sphinx-autoapi>=1.8.0',\\n 'sphinx-copybutton',\\n 'sphinx-jinja>=2.0',\\n 'sphinx-rtd-theme>=0.1.6',\\n 'sphinxcontrib-httpdomain>=1.7.0',\\n 'sphinxcontrib-redoc>=1.6.0',\\n 'sphinxcontrib-spelling>=7.3',\\n]\\ndocker = [\\n 'docker>=5.0.3',\\n]\\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\\ndruid = [\\n 'pydruid>=0.4.1',\\n]\\nelasticsearch = [\\n 'elasticsearch>7',\\n 'elasticsearch-dbapi',\\n 'elasticsearch-dsl>=5.0.0',\\n]\\nexasol = ['pyexasol>=0.5.1', pandas_requirement]\\nfacebook = [\\n 'facebook-business>=6.0.2',\\n]\\nflask_appbuilder_authlib = [\\n 'authlib',\\n]\\ngithub = [\\n 'pygithub',\\n]\\ngoogle = [\\n # Google has very clear rules on what dependencies should be used. All the limits below\\n # follow strict guidelines of Google Libraries as quoted here:\\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\\n # should preserve >1, <3 pins on these packages.\\n # https://github.com/googleapis/google-cloud-python/issues/10566\\n # Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries\\n # Introduced breaking changes across the board. Those libraries should be upgraded soon\\n # TODO: Upgrade all Google libraries that are limited to <2.0.0\\n 'PyOpenSSL',\\n # The Google Ads 14.0.1 breaks PIP and eager upgrade as it requires\\n # google-api-core>=2.0.0 which cannot be used yet (see below comment)\\n # and https://github.com/apache/airflow/issues/18705#issuecomment-933746150\\n 'google-ads>=12.0.0,<14.0.1',\\n 'google-api-core>=2.7.0,<3.0.0',\\n 'google-api-python-client>=1.6.0,<2.0.0',\\n 'google-auth>=1.0.0',\\n 'google-auth-httplib2>=0.0.1',\\n 'google-cloud-aiplatform>=1.7.1,<2.0.0',\\n 'google-cloud-automl>=2.1.0',\\n 'google-cloud-bigquery-datatransfer>=3.0.0',\\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\\n 'google-cloud-build>=3.0.0',\\n 'google-cloud-container>=0.1.1,<2.0.0',\\n 'google-cloud-datacatalog>=3.0.0',\\n 'google-cloud-dataplex>=0.1.0',\\n 'google-cloud-dataproc>=3.1.0',\\n 'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',\\n 'google-cloud-dlp>=0.11.0,<2.0.0',\\n 'google-cloud-kms>=2.0.0',\\n 'google-cloud-language>=1.1.1,<2.0.0',\\n 'google-cloud-logging>=2.1.1',\\n 'google-cloud-memcache>=0.2.0',\\n 'google-cloud-monitoring>=2.0.0',\\n 'google-cloud-os-login>=2.0.0',\\n 'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',\\n 'google-cloud-pubsub>=2.0.0',\\n 'google-cloud-redis>=2.0.0',\\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\\n 'google-cloud-spanner>=1.10.0,<2.0.0',\\n 'google-cloud-speech>=0.36.3,<2.0.0',\\n 'google-cloud-storage>=1.30,<2.0.0',\\n 'google-cloud-tasks>=2.0.0',\\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\\n 'google-cloud-translate>=1.5.0,<2.0.0',\\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\\n 'google-cloud-vision>=0.35.2,<2.0.0',\\n 'google-cloud-workflows>=0.1.0,<2.0.0',\\n 'grpcio-gcp>=0.2.2',\\n 'httpx',\\n 'json-merge-patch>=0.2',\\n 'looker-sdk>=22.2.0',\\n 'pandas-gbq',\\n pandas_requirement,\\n 'sqlalchemy-bigquery>=1.2.1',\\n]\\ngrpc = [\\n # Google has very clear rules on what dependencies should be used. All the limits below\\n # follow strict guidelines of Google Libraries as quoted here:\\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\\n # should preserve >1, <3 pins on these packages.\\n # https://github.com/googleapis/google-cloud-python/issues/10566\\n 'google-auth>=1.0.0, <3.0.0',\\n 'google-auth-httplib2>=0.0.1',\\n 'grpcio>=1.15.0',\\n]\\nhashicorp = [\\n 'hvac>=0.10',\\n]\\nhdfs = [\\n 'snakebite-py3',\\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\\n]\\nhive = [\\n 'hmsclient>=0.1.0',\\n 'pyhive[hive]>=0.6.0',\\n # in case of Python 3.9 sasl library needs to be installed with version higher or equal than\\n # 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls\\n # the sasl library anyway (and there sasl library version is not relevant)\\n 'sasl>=0.3.1; python_version>=\\\"3.9\\\"',\\n 'thrift>=0.9.2',\\n pandas_requirement,\\n]\\nhttp = [\\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\\n # release it as a requirement for airflow\\n 'requests>=2.26.0',\\n]\\nhttp_provider = [\\n 'apache-airflow-providers-http',\\n]\\ninfluxdb = [\\n 'influxdb-client>=1.19.0',\\n pandas_requirement,\\n]\\njdbc = [\\n 'jaydebeapi>=1.1.1',\\n]\\njenkins = [\\n 'python-jenkins>=1.0.0',\\n]\\njira = [\\n 'JIRA>1.0.7',\\n]\\nkerberos = [\\n 'pykerberos>=1.1.13',\\n 'requests_kerberos>=0.10.0',\\n 'thrift_sasl>=0.2.0',\\n]\\nkubernetes = [\\n 'cryptography>=2.0.0',\\n # The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core\\n # Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer\\n # (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow\\n # So we should limit it to the next MAJOR version and only deliberately bump the version when we\\n # tested it, and we know it can be bumped. Bumping this version should also be connected with\\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\\n # potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow\\n # core is not hard-limited via install-requirements, only by extra).\\n 'kubernetes>=21.7.0,<24',\\n]\\nkylin = ['kylinpy>=2.6']\\nldap = [\\n 'ldap3>=2.5.1',\\n 'python-ldap',\\n]\\nleveldb = ['plyvel; platform_machine != \\\"aarch64\\\"']\\nmongo = [\\n 'dnspython>=1.13.0',\\n # pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0\\n # TODO: Upgrade to pymongo 4.0.0+\\n 'pymongo>=3.6.0,<4.0.0',\\n]\\nmssql = [\\n 'pymssql>=2.1.5; platform_machine != \\\"aarch64\\\"',\\n]\\nmysql = [\\n 'mysql-connector-python>=8.0.11; platform_machine != \\\"aarch64\\\"',\\n 'mysqlclient>=1.3.6; platform_machine != \\\"aarch64\\\"',\\n]\\nneo4j = ['neo4j>=4.2.1']\\nodbc = [\\n 'pyodbc',\\n]\\nopsgenie = [\\n 'opsgenie-sdk>=2.1.5',\\n]\\noracle = [\\n 'cx_Oracle>=5.1.2',\\n]\\npagerduty = [\\n 'pdpyras>=4.1.2',\\n]\\npandas = [\\n pandas_requirement,\\n]\\npapermill = [\\n 'papermill[all]>=1.2.1',\\n 'scrapbook[all]',\\n]\\npassword = [\\n 'bcrypt>=2.0.0',\\n 'flask-bcrypt>=0.7.1',\\n]\\npinot = [\\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\\n # causes a problem with newer versions.\\n 'pinotdb>0.1.2',\\n]\\nplexus = [\\n 'arrow>=0.16.0',\\n]\\npostgres = [\\n 'psycopg2-binary>=2.7.4',\\n]\\npresto = [\\n # The limit to Presto 0.8 for unknown reason\\n # TODO: Remove the limit\\n 'presto-python-client>=0.7.0,<0.8',\\n pandas_requirement,\\n]\\npsrp = [\\n 'pypsrp>=0.8',\\n]\\nqubole = [\\n 'qds-sdk>=1.10.4',\\n]\\nrabbitmq = [\\n 'amqp',\\n]\\nredis = [\\n # Redis 4 introduced a number of changes that likely need testing including mixins in redis commands\\n # as well as unquoting URLS with `urllib.parse.unquote`:\\n # https://github.com/redis/redis-py/blob/master/CHANGES\\n # TODO: upgrade to support redis package >=4\\n 'redis~=3.2',\\n]\\nsalesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]\\nsamba = [\\n 'smbprotocol>=1.5.0',\\n]\\nsegment = [\\n 'analytics-python>=1.2.9',\\n]\\nsendgrid = [\\n 'sendgrid>=6.0.0',\\n]\\nsentry = [\\n 'blinker>=1.1',\\n 'sentry-sdk>=0.8.0',\\n]\\nsingularity = ['spython>=0.0.56']\\nslack = [\\n 'slack_sdk>=3.0.0',\\n]\\nsnowflake = [\\n 'snowflake-connector-python>=2.4.1',\\n 'snowflake-sqlalchemy>=1.1.0',\\n]\\nspark = [\\n 'pyspark',\\n]\\nssh = [\\n 'paramiko>=2.6.0',\\n 'pysftp>=0.2.9',\\n 'sshtunnel>=0.3.2',\\n]\\nstatsd = [\\n 'statsd>=3.3.0',\\n]\\ntableau = [\\n 'tableauserverclient',\\n]\\ntelegram = [\\n 'python-telegram-bot>=13.0',\\n]\\ntrino = [\\n 'trino>=0.301.0',\\n pandas_requirement,\\n]\\nvertica = [\\n 'vertica-python>=0.5.1',\\n]\\nvirtualenv = [\\n 'virtualenv',\\n]\\nwebhdfs = [\\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\\n]\\nwinrm = [\\n 'pywinrm>=0.4',\\n]\\nyandex = [\\n 'yandexcloud>=0.146.0',\\n]\\nzendesk = [\\n 'zenpy>=2.0.24',\\n]\\n# End dependencies group\\n\\n# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them\\n# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\\n# for details. Wy want to install them explicitly because we want to eventually move to\\n# mypyd which does not support installing the types dynamically with --install-types\\nmypy_dependencies = [\\n # TODO: upgrade to newer versions of MyPy continuously as they are released\\n 'mypy==0.910',\\n 'types-boto',\\n 'types-certifi',\\n 'types-croniter',\\n 'types-Deprecated',\\n 'types-docutils',\\n 'types-freezegun',\\n 'types-paramiko',\\n 'types-protobuf',\\n 'types-python-dateutil',\\n 'types-python-slugify',\\n 'types-pytz',\\n 'types-redis',\\n 'types-requests',\\n 'types-setuptools',\\n 'types-termcolor',\\n 'types-tabulate',\\n 'types-toml',\\n 'types-Markdown',\\n 'types-PyMySQL',\\n 'types-PyYAML',\\n]\\n\\n# Dependencies needed for development only\\ndevel_only = [\\n 'aws_xray_sdk',\\n 'beautifulsoup4>=4.7.1',\\n 'black',\\n 'blinker',\\n 'bowler',\\n 'click>=8.0',\\n 'coverage',\\n 'filelock',\\n 'flake8>=3.6.0',\\n 'flake8-colors',\\n 'flaky',\\n 'freezegun',\\n # Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0\\n # Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without\\n # getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version\\n # similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there\\n # are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails\\n # to resolve that github3 3.0.0 is the right version to use.\\n # This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when\\n # pip resolution is improved to handle the case. The issue which describes this PIP behaviour\\n # and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924\\n 'github3.py<3.1.0',\\n 'gitpython',\\n 'ipdb',\\n 'jira',\\n 'jsondiff',\\n 'mongomock',\\n 'moto>=3.1.0',\\n 'parameterized',\\n 'paramiko',\\n 'pipdeptree',\\n 'pre-commit',\\n 'pypsrp',\\n 'pygithub',\\n 'pysftp',\\n # Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit\\n # It contains a number of potential breaking changes but none of them looks breaking our use\\n # https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03\\n # TODO: upgrade it and remove the limit\\n 'pytest~=6.0',\\n 'pytest-asyncio',\\n 'pytest-cov',\\n 'pytest-instafail',\\n # We should attempt to remove the limit when we upgrade Pytest\\n # TODO: remove the limit when we upgrade pytest\\n 'pytest-rerunfailures~=9.1',\\n 'pytest-timeouts',\\n 'pytest-xdist',\\n 'python-jose',\\n 'pywinrm',\\n 'qds-sdk>=1.9.6',\\n 'pytest-httpx',\\n 'requests_mock',\\n 'rich_click',\\n 'semver',\\n 'twine',\\n 'wheel',\\n 'yamllint',\\n]\\n\\ndevel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password\\ndevel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs\\n\\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\\n 'airbyte': http_provider,\\n 'alibaba': alibaba,\\n 'amazon': amazon,\\n 'apache.beam': apache_beam,\\n 'apache.cassandra': cassandra,\\n 'apache.drill': drill,\\n 'apache.druid': druid,\\n 'apache.hdfs': hdfs,\\n 'apache.hive': hive,\\n 'apache.kylin': kylin,\\n 'apache.livy': http_provider,\\n 'apache.pig': [],\\n 'apache.pinot': pinot,\\n 'apache.spark': spark,\\n 'apache.sqoop': [],\\n 'arangodb': arangodb,\\n 'asana': asana,\\n 'celery': celery,\\n 'cloudant': cloudant,\\n 'cncf.kubernetes': kubernetes,\\n 'databricks': databricks,\\n 'datadog': datadog,\\n 'dbt.cloud': http_provider,\\n 'dingding': [],\\n 'discord': [],\\n 'docker': docker,\\n 'elasticsearch': elasticsearch,\\n 'exasol': exasol,\\n 'facebook': facebook,\\n 'ftp': [],\\n 'github': github,\\n 'google': google,\\n 'grpc': grpc,\\n 'hashicorp': hashicorp,\\n 'http': http,\\n 'imap': [],\\n 'influxdb': influxdb,\\n 'jdbc': jdbc,\\n 'jenkins': jenkins,\\n 'jira': jira,\\n 'microsoft.azure': azure,\\n 'microsoft.mssql': mssql,\\n 'microsoft.psrp': psrp,\\n 'microsoft.winrm': winrm,\\n 'mongo': mongo,\\n 'mysql': mysql,\\n 'neo4j': neo4j,\\n 'odbc': odbc,\\n 'openfaas': [],\\n 'opsgenie': opsgenie,\\n 'oracle': oracle,\\n 'pagerduty': pagerduty,\\n 'papermill': papermill,\\n 'plexus': plexus,\\n 'postgres': postgres,\\n 'presto': presto,\\n 'qubole': qubole,\\n 'redis': redis,\\n 'salesforce': salesforce,\\n 'samba': samba,\\n 'segment': segment,\\n 'sendgrid': sendgrid,\\n 'sftp': ssh,\\n 'singularity': singularity,\\n 'slack': slack,\\n 'snowflake': snowflake,\\n 'sqlite': [],\\n 'ssh': ssh,\\n 'tableau': tableau,\\n 'telegram': telegram,\\n 'trino': trino,\\n 'vertica': vertica,\\n 'yandex': yandex,\\n 'zendesk': zendesk,\\n}\\n\\n# Those are all additional extras which do not have their own 'providers'\\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\\n# That can be used in custom python/bash operators.\\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\\n 'apache.atlas': atlas,\\n 'apache.webhdfs': webhdfs,\\n}\\n\\n\\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\\n 'async': async_packages,\\n 'celery': celery, # also has provider, but it extends the core with the CeleryExecutor\\n 'cgroups': cgroups,\\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\\n 'dask': dask,\\n 'deprecated_api': deprecated_api,\\n 'github_enterprise': flask_appbuilder_authlib,\\n 'google_auth': flask_appbuilder_authlib,\\n 'kerberos': kerberos,\\n 'ldap': ldap,\\n 'leveldb': leveldb,\\n 'pandas': pandas,\\n 'password': password,\\n 'rabbitmq': rabbitmq,\\n 'sentry': sentry,\\n 'statsd': statsd,\\n 'virtualenv': virtualenv,\\n}\\n\\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\\n\\n\\ndef add_extras_for_all_providers() -> None:\\n \\\"\\\"\\\"\\n Adds extras for all providers.\\n By default all providers have the same extra name as provider id, for example\\n 'apache.hive' extra has 'apache.hive' provider requirement.\\n \\\"\\\"\\\"\\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\\n\\n\\ndef add_additional_extras() -> None:\\n \\\"\\\"\\\"Adds extras for all additional extras.\\\"\\\"\\\"\\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\\n\\n\\nadd_extras_for_all_providers()\\nadd_additional_extras()\\n\\n#############################################################################################################\\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\\n#############################################################################################################\\n\\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\\n 'atlas': 'apache.atlas',\\n 'aws': 'amazon',\\n 'azure': 'microsoft.azure',\\n 'cassandra': 'apache.cassandra',\\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\\n 'druid': 'apache.druid',\\n 'gcp': 'google',\\n 'gcp_api': 'google',\\n 'hdfs': 'apache.hdfs',\\n 'hive': 'apache.hive',\\n 'kubernetes': 'cncf.kubernetes',\\n 'mssql': 'microsoft.mssql',\\n 'pinot': 'apache.pinot',\\n 'qds': 'qubole',\\n 's3': 'amazon',\\n 'spark': 'apache.spark',\\n 'webhdfs': 'apache.webhdfs',\\n 'winrm': 'microsoft.winrm',\\n}\\n\\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\\n \\\"crypto\\\",\\n \\\"webhdfs\\\",\\n]\\n\\n\\ndef add_extras_for_all_deprecated_aliases() -> None:\\n \\\"\\\"\\\"\\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\\n as the extras they are replaced with.\\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\\n \\\"\\\"\\\"\\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\\n if requirements is None:\\n raise Exception(f\\\"The extra {extra} is missing for deprecated alias {alias}\\\")\\n EXTRAS_REQUIREMENTS[alias] = requirements\\n\\n\\ndef add_all_deprecated_provider_packages() -> None:\\n \\\"\\\"\\\"\\n For deprecated aliases that are providers, we will swap the providers requirements to instead\\n be the provider itself.\\n\\n e.g. {\\\"kubernetes\\\": [\\\"kubernetes>=3.0.0, <12.0.0\\\", ...]} becomes\\n {\\\"kubernetes\\\": [\\\"apache-airflow-provider-cncf-kubernetes\\\"]}\\n \\\"\\\"\\\"\\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\\n continue\\n replace_extra_requirement_with_provider_packages(alias, [provider])\\n\\n\\nadd_extras_for_all_deprecated_aliases()\\n\\n#############################################################################################################\\n# End of deprecated section\\n#############################################################################################################\\n\\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\\n# All providers. It is used by pre-commits.\\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\\n\\nALL_DB_PROVIDERS = [\\n 'apache.cassandra',\\n 'apache.drill',\\n 'apache.druid',\\n 'apache.hdfs',\\n 'apache.hive',\\n 'apache.pinot',\\n 'arangodb',\\n 'cloudant',\\n 'databricks',\\n 'exasol',\\n 'influxdb',\\n 'microsoft.mssql',\\n 'mongo',\\n 'mysql',\\n 'neo4j',\\n 'postgres',\\n 'presto',\\n 'trino',\\n 'vertica',\\n]\\n\\n# Special requirements for all database-related providers. They are de-duplicated.\\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\\n\\n# Requirements for all \\\"user\\\" extras (no devel). They are de-duplicated. Note that we do not need\\n# to separately add providers requirements - they have been already added as 'providers' extras above\\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\\n\\n# All user extras here\\nEXTRAS_REQUIREMENTS[\\\"all\\\"] = _all_requirements\\n\\n# All db user extras here\\nEXTRAS_REQUIREMENTS[\\\"all_dbs\\\"] = all_dbs + pandas\\n\\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\\n# but we keep it for explicit sake. We are de-duplicating it anyway.\\ndevel_all = list(set(_all_requirements + doc + devel + devel_hadoop))\\n\\n# Those are packages excluded for \\\"all\\\" dependencies\\nPACKAGES_EXCLUDED_FOR_ALL = []\\nPACKAGES_EXCLUDED_FOR_ALL.extend(\\n [\\n 'snakebite',\\n ]\\n)\\n\\n\\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\\n \\\"\\\"\\\"\\n Checks if package should be excluded.\\n\\n :param package: package name (beginning of it)\\n :param exclusion_list: list of excluded packages\\n :return: true if package should be excluded\\n \\\"\\\"\\\"\\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\\n\\n\\ndevel_all = [\\n package\\n for package in devel_all\\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\\n]\\n\\ndevel_ci = devel_all\\n\\n\\n# Those are extras that we have to add for development purposes\\n# They can be use to install some predefined set of dependencies.\\nEXTRAS_REQUIREMENTS[\\\"doc\\\"] = doc\\nEXTRAS_REQUIREMENTS[\\\"devel\\\"] = devel # devel already includes doc\\nEXTRAS_REQUIREMENTS[\\\"devel_hadoop\\\"] = devel_hadoop # devel_hadoop already includes devel\\nEXTRAS_REQUIREMENTS[\\\"devel_all\\\"] = devel_all\\nEXTRAS_REQUIREMENTS[\\\"devel_ci\\\"] = devel_ci\\n\\n\\ndef sort_extras_requirements() -> Dict[str, List[str]]:\\n \\\"\\\"\\\"\\n The dictionary order remains when keys() are retrieved.\\n Sort both: extras and list of dependencies to make it easier to analyse problems\\n external packages will be first, then if providers are added they are added at the end of the lists.\\n \\\"\\\"\\\"\\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\\n for extra_list in sorted_requirements.values():\\n extra_list.sort()\\n return sorted_requirements\\n\\n\\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\\n\\n# Those providers are pre-installed always when airflow is installed.\\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\\nPREINSTALLED_PROVIDERS = [\\n 'ftp',\\n 'http',\\n 'imap',\\n 'sqlite',\\n]\\n\\n\\ndef get_provider_package_from_package_id(package_id: str) -> str:\\n \\\"\\\"\\\"\\n Builds the name of provider package out of the package id provided/\\n\\n :param package_id: id of the package (like amazon or microsoft.azure)\\n :return: full name of package in PyPI\\n \\\"\\\"\\\"\\n package_suffix = package_id.replace(\\\".\\\", \\\"-\\\")\\n return f\\\"apache-airflow-providers-{package_suffix}\\\"\\n\\n\\ndef get_excluded_providers() -> List[str]:\\n \\\"\\\"\\\"Returns packages excluded for the current python version.\\\"\\\"\\\"\\n return []\\n\\n\\ndef get_all_provider_packages() -> str:\\n \\\"\\\"\\\"Returns all provider packages configured in setup.py\\\"\\\"\\\"\\n excluded_providers = get_excluded_providers()\\n return \\\" \\\".join(\\n get_provider_package_from_package_id(package)\\n for package in PROVIDERS_REQUIREMENTS\\n if package not in excluded_providers\\n )\\n\\n\\nclass AirflowDistribution(Distribution):\\n \\\"\\\"\\\"The setuptools.Distribution subclass with Airflow specific behaviour\\\"\\\"\\\"\\n\\n def __init__(self, attrs=None):\\n super().__init__(attrs)\\n self.install_requires = None\\n\\n def parse_config_files(self, *args, **kwargs) -> None:\\n \\\"\\\"\\\"\\n Ensure that when we have been asked to install providers from sources\\n that we don't *also* try to install those providers from PyPI.\\n Also we should make sure that in this case we copy provider.yaml files so that\\n Providers manager can find package information.\\n \\\"\\\"\\\"\\n super().parse_config_files(*args, **kwargs)\\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\\n self.install_requires = [\\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\\n ]\\n provider_yaml_files = glob.glob(\\\"airflow/providers/**/provider.yaml\\\", recursive=True)\\n for provider_yaml_file in provider_yaml_files:\\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \\\"airflow\\\"))\\n self.package_data['airflow'].append(provider_relative_path)\\n else:\\n self.install_requires.extend(\\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\\n )\\n\\n\\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\\n \\\"\\\"\\\"\\n Replaces extra requirement with provider package. The intention here is that when\\n the provider is added as dependency of extra, there is no need to add the dependencies\\n separately. This is not needed and even harmful, because in case of future versions of\\n the provider, the requirements might change, so hard-coding requirements from the version\\n that was available at the release time might cause dependency conflicts in the future.\\n\\n Say for example that you have salesforce provider with those deps:\\n\\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\\n\\n Initially ['salesforce'] extra has those requirements and it works like that when you install\\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\\n the production installation is used, The dependencies are changed:\\n\\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\\n\\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\\n\\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\\n changes its dependencies, they will transitively change as well.\\n\\n In the constraint mechanism we save both - provider versions and it's dependencies\\n version, which means that installation using constraints is repeatable.\\n\\n For K8s and Celery which are both \\\"Core executors\\\" and \\\"Providers\\\" we have to\\n add the base dependencies to core as well, in order to mitigate problems where\\n newer version of provider will have less strict limits. This should be done for both\\n extras and their deprecated aliases. This is not a full protection however, the way\\n extras work, this will not add \\\"hard\\\" limits for Airflow and the user who does not use\\n constraints.\\n\\n :param extra: Name of the extra to add providers to\\n :param providers: list of provider ids\\n \\\"\\\"\\\"\\n if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:\\n EXTRAS_REQUIREMENTS[extra].extend(\\n [get_provider_package_from_package_id(package_name) for package_name in providers]\\n )\\n else:\\n EXTRAS_REQUIREMENTS[extra] = [\\n get_provider_package_from_package_id(package_name) for package_name in providers\\n ]\\n\\n\\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\\n \\\"\\\"\\\"\\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\\n to the \\\"bulk\\\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\\n initial values, so instead of replacing them (see previous function) we can extend them.\\n\\n :param extra: Name of the extra to add providers to\\n :param providers: list of provider ids\\n \\\"\\\"\\\"\\n EXTRAS_REQUIREMENTS[extra].extend(\\n [get_provider_package_from_package_id(package_name) for package_name in providers]\\n )\\n\\n\\ndef add_all_provider_packages() -> None:\\n \\\"\\\"\\\"\\n In case of regular installation (providers installed from packages), we should add extra dependencies to\\n Airflow - to get the providers automatically installed when those extras are installed.\\n\\n For providers installed from sources we skip that step. That helps to test and install airflow with\\n all packages in CI - for example when new providers are added, otherwise the installation would fail\\n as the new provider is not yet in PyPI.\\n\\n \\\"\\\"\\\"\\n for provider in ALL_PROVIDERS:\\n replace_extra_requirement_with_provider_packages(provider, [provider])\\n add_provider_packages_to_extra_requirements(\\\"all\\\", ALL_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\\"devel_ci\\\", ALL_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\\"devel_all\\\", ALL_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\\"all_dbs\\\", ALL_DB_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\n \\\"devel_hadoop\\\", [\\\"apache.hdfs\\\", \\\"apache.hive\\\", \\\"presto\\\", \\\"trino\\\"]\\n )\\n add_all_deprecated_provider_packages()\\n\\n\\nclass Develop(develop_orig):\\n \\\"\\\"\\\"Forces removal of providers in editable mode.\\\"\\\"\\\"\\n\\n def run(self) -> None: # type: ignore\\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\\n # We need to run \\\"python3 -m pip\\\" because it might be that older PIP binary is in the path\\n # And it results with an error when running pip directly (cannot import pip module)\\n # also PIP does not have a stable API so we have to run subprocesses ¯\\\\_(ツ)_/¯\\n try:\\n installed_packages = (\\n subprocess.check_output([\\\"python3\\\", \\\"-m\\\", \\\"pip\\\", \\\"freeze\\\"]).decode().splitlines()\\n )\\n airflow_provider_packages = [\\n package_line.split(\\\"=\\\")[0]\\n for package_line in installed_packages\\n if package_line.startswith(\\\"apache-airflow-providers\\\")\\n ]\\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\\n subprocess.check_call([\\\"python3\\\", \\\"-m\\\", \\\"pip\\\", \\\"uninstall\\\", \\\"--yes\\\", *airflow_provider_packages])\\n except subprocess.CalledProcessError as e:\\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\\n super().run()\\n\\n\\nclass Install(install_orig):\\n \\\"\\\"\\\"Forces installation of providers from sources in editable mode.\\\"\\\"\\\"\\n\\n def run(self) -> None:\\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\\n super().run()\\n\\n\\ndef do_setup() -> None:\\n \\\"\\\"\\\"\\n Perform the Airflow package setup.\\n\\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\\n \\\"\\\"\\\"\\n setup_kwargs = {}\\n\\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\\n \\\"\\\"\\\"\\n When installing providers from sources we install all namespace packages found below airflow,\\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\\n The kwargs in setup() call override those that are specified in setup.cfg.\\n \\\"\\\"\\\"\\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\\n\\n include_provider_namespace_packages_when_installing_from_sources()\\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\\n print(\\\"Installing providers from sources. Skip adding providers as dependencies\\\")\\n else:\\n add_all_provider_packages()\\n\\n write_version()\\n setup(\\n distclass=AirflowDistribution,\\n version=version,\\n extras_require=EXTRAS_REQUIREMENTS,\\n download_url=('https://archive.apache.org/dist/airflow/' + version),\\n cmdclass={\\n 'extra_clean': CleanCommand,\\n 'compile_assets': CompileAssets,\\n 'list_extras': ListExtras,\\n 'install': Install, # type: ignore\\n 'develop': Develop,\\n },\\n test_suite='setup.airflow_test_suite',\\n **setup_kwargs, # type: ignore\\n )\\n\\n\\nif __name__ == \\\"__main__\\\":\\n do_setup() # comment\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom copy import deepcopy\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Setuptools patches this import to point to a vendored copy instead of the\n# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.\nfrom distutils import log # isort: skip\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\nPY39 = sys.version_info >= (3, 9)\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.3.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]) -> None:\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e:\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self) -> None:\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])) -> None:\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\npandas_requirement = 'pandas>=0.17.1'\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\nalibaba = [\n 'oss2>=2.14.0',\n]\namazon = [\n 'boto3>=1.15.0',\n # watchtower 3 has been released end Jan and introduced breaking change across the board that might\n # change logging behaviour:\n # https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26\n # TODO: update to watchtower >3\n 'watchtower~=2.0.1',\n 'jsonpath_ng>=1.5.3',\n 'redshift_connector>=2.0.888',\n 'sqlalchemy_redshift>=0.8.6',\n pandas_requirement,\n 'mypy-boto3-rds>=1.21.0',\n 'mypy-boto3-redshift-data>=1.21.0',\n]\napache_beam = [\n 'apache-beam>=2.33.0',\n]\narangodb = ['python-arango>=7.3.2']\nasana = ['asana>=0.10']\nasync_packages = [\n 'eventlet>=0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=4.0.0',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault-secrets>=4.1.0,<5.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n # Azure integration uses old librarires and the limits below reflect that\n # TODO: upgrade to newer versions of all the below libraries\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n # limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9\n 'azure-storage-blob>=12.7.0,<12.9.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0',\n]\ncelery = [\n # The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer\n # (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).\n # This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only\n # deliberately bump the version when we tested it, and we know it can be bumped.\n # Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'celery>=5.2.3,<6',\n 'flower>=1.0.0',\n]\ncgroups = [\n # Cgroupspy 0.2.2 added Python 3.10 compatibility\n 'cgroupspy>=0.2.2',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = [\n # Dask support is limited, we need Dask team to upgrade support for dask if we were to continue\n # Supporting it in the future\n 'cloudpickle>=1.4.1',\n 'dask>=2.9.0',\n 'distributed>=2.11.1',\n]\ndatabricks = [\n 'requests>=2.26.0, <3',\n 'databricks-sql-connector>=1.0.2, <2.0.0',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndeprecated_api = [\n 'requests>=2.26.0',\n]\ndoc = [\n 'click>=8.0',\n 'sphinx>=4.4.0',\n # Docutils 0.17.0 converts generated
into
and breaks our doc formatting\n # By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle\n #
tags for sections\n 'docutils<0.17.0',\n # Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,\n # even though Sphinx 4.4.0 has this but with python_version<3.10.\n 'importlib-metadata>=4.4; python_version < \"3.8\"',\n 'sphinx-airflow-theme',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi>=1.8.0',\n 'sphinx-copybutton',\n 'sphinx-jinja>=2.0',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling>=7.3',\n]\ndocker = [\n 'docker>=5.0.3',\n]\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7',\n 'elasticsearch-dbapi',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = ['pyexasol>=0.5.1', pandas_requirement]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_appbuilder_authlib = [\n 'authlib',\n]\ngithub = [\n 'pygithub',\n]\ngoogle = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n # Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries\n # Introduced breaking changes across the board. Those libraries should be upgraded soon\n # TODO: Upgrade all Google libraries that are limited to <2.0.0\n 'PyOpenSSL',\n # The Google Ads 14.0.1 breaks PIP and eager upgrade as it requires\n # google-api-core>=2.0.0 which cannot be used yet (see below comment)\n # and https://github.com/apache/airflow/issues/18705#issuecomment-933746150\n 'google-ads>=12.0.0,<14.0.1',\n 'google-api-core>=2.7.0,<3.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n 'google-auth>=1.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-aiplatform>=1.7.1,<2.0.0',\n 'google-cloud-automl>=2.1.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-build>=3.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0',\n 'google-cloud-dataplex>=0.1.0',\n 'google-cloud-dataproc>=3.1.0',\n 'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1',\n 'google-cloud-memcache>=0.2.0',\n 'google-cloud-monitoring>=2.0.0',\n 'google-cloud-os-login>=2.0.0',\n 'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',\n 'google-cloud-pubsub>=2.0.0',\n 'google-cloud-redis>=2.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'httpx',\n 'json-merge-patch>=0.2',\n 'looker-sdk>=22.2.0',\n 'pandas-gbq',\n pandas_requirement,\n 'sqlalchemy-bigquery>=1.2.1',\n]\ngrpc = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.0.0, <3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac>=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0',\n # in case of Python 3.9 sasl library needs to be installed with version higher or equal than\n # 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls\n # the sasl library anyway (and there sasl library version is not relevant)\n 'sasl>=0.3.1; python_version>=\"3.9\"',\n 'thrift>=0.9.2',\n pandas_requirement,\n]\nhttp = [\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\n # release it as a requirement for airflow\n 'requests>=2.26.0',\n]\nhttp_provider = [\n 'apache-airflow-providers-http',\n]\ninfluxdb = [\n 'influxdb-client>=1.19.0',\n pandas_requirement,\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n # The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer\n # (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow\n # So we should limit it to the next MAJOR version and only deliberately bump the version when we\n # tested it, and we know it can be bumped. Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'kubernetes>=21.7.0,<24',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nleveldb = ['plyvel; platform_machine != \"aarch64\"']\nmongo = [\n 'dnspython>=1.13.0',\n # pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0\n # TODO: Upgrade to pymongo 4.0.0+\n 'pymongo>=3.6.0,<4.0.0',\n]\nmssql = [\n 'pymssql>=2.1.5; platform_machine != \"aarch64\"',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11; platform_machine != \"aarch64\"',\n 'mysqlclient>=1.3.6; platform_machine != \"aarch64\"',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\nopsgenie = [\n 'opsgenie-sdk>=2.1.5',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2',\n]\npandas = [\n pandas_requirement,\n]\npapermill = [\n 'papermill[all]>=1.2.1',\n 'scrapbook[all]',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2',\n]\nplexus = [\n 'arrow>=0.16.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = [\n # The limit to Presto 0.8 for unknown reason\n # TODO: Remove the limit\n 'presto-python-client>=0.7.0,<0.8',\n pandas_requirement,\n]\npsrp = [\n 'pypsrp>=0.8',\n]\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp',\n]\nredis = [\n # Redis 4 introduced a number of changes that likely need testing including mixins in redis commands\n # as well as unquoting URLS with `urllib.parse.unquote`:\n # https://github.com/redis/redis-py/blob/master/CHANGES\n # TODO: upgrade to support redis package >=4\n 'redis~=3.2',\n]\nsalesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]\nsamba = [\n 'smbprotocol>=1.5.0',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.3.2',\n]\nstatsd = [\n 'statsd>=3.3.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot>=13.0',\n]\ntrino = [\n 'trino>=0.301.0',\n pandas_requirement,\n]\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm>=0.4',\n]\nyandex = [\n 'yandexcloud>=0.146.0',\n]\nzendesk = [\n 'zenpy>=2.0.24',\n]\n# End dependencies group\n\n# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them\n# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\n# for details. Wy want to install them explicitly because we want to eventually move to\n# mypyd which does not support installing the types dynamically with --install-types\nmypy_dependencies = [\n # TODO: upgrade to newer versions of MyPy continuously as they are released\n 'mypy==0.910',\n 'types-boto',\n 'types-certifi',\n 'types-croniter',\n 'types-Deprecated',\n 'types-docutils',\n 'types-freezegun',\n 'types-paramiko',\n 'types-protobuf',\n 'types-python-dateutil',\n 'types-python-slugify',\n 'types-pytz',\n 'types-redis',\n 'types-requests',\n 'types-setuptools',\n 'types-termcolor',\n 'types-tabulate',\n 'types-toml',\n 'types-Markdown',\n 'types-PyMySQL',\n 'types-PyYAML',\n]\n\n# Dependencies needed for development only\ndevel_only = [\n 'aws_xray_sdk',\n 'beautifulsoup4>=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click>=8.0',\n 'coverage',\n 'filelock',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n # Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0\n # Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without\n # getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version\n # similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there\n # are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails\n # to resolve that github3 3.0.0 is the right version to use.\n # This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when\n # pip resolution is improved to handle the case. The issue which describes this PIP behaviour\n # and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924\n 'github3.py<3.1.0',\n 'gitpython',\n 'ipdb',\n 'jira',\n 'jsondiff',\n 'mongomock',\n 'moto>=3.1.0',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pypsrp',\n 'pygithub',\n 'pysftp',\n # Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit\n # It contains a number of potential breaking changes but none of them looks breaking our use\n # https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03\n # TODO: upgrade it and remove the limit\n 'pytest~=6.0',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-instafail',\n # We should attempt to remove the limit when we upgrade Pytest\n # TODO: remove the limit when we upgrade pytest\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'python-jose',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'pytest-httpx',\n 'requests_mock',\n 'rich_click',\n 'semver',\n 'twine',\n 'wheel',\n 'yamllint',\n]\n\ndevel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password\ndevel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': http_provider,\n 'alibaba': alibaba,\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.drill': drill,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': http_provider,\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'arangodb': arangodb,\n 'asana': asana,\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dbt.cloud': http_provider,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'github': github,\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': http,\n 'imap': [],\n 'influxdb': influxdb,\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.psrp': psrp,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': opsgenie,\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'trino': trino,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the CeleryExecutor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'deprecated_api': deprecated_api,\n 'github_enterprise': flask_appbuilder_authlib,\n 'google_auth': flask_appbuilder_authlib,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'leveldb': leveldb,\n 'pandas': pandas,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\n \"crypto\",\n \"webhdfs\",\n]\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\ndef add_all_deprecated_provider_packages() -> None:\n \"\"\"\n For deprecated aliases that are providers, we will swap the providers requirements to instead\n be the provider itself.\n\n e.g. {\"kubernetes\": [\"kubernetes>=3.0.0, <12.0.0\", ...]} becomes\n {\"kubernetes\": [\"apache-airflow-provider-cncf-kubernetes\"]}\n \"\"\"\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\n continue\n replace_extra_requirement_with_provider_packages(alias, [provider])\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.drill',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'arangodb',\n 'cloudant',\n 'databricks',\n 'exasol',\n 'influxdb',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'trino',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs + pandas\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = devel_all\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel # devel already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n The dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str) -> str:\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_excluded_providers() -> List[str]:\n \"\"\"Returns packages excluded for the current python version.\"\"\"\n return []\n\n\ndef get_all_provider_packages() -> str:\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n excluded_providers = get_excluded_providers()\n return \" \".join(\n get_provider_package_from_package_id(package)\n for package in PROVIDERS_REQUIREMENTS\n if package not in excluded_providers\n )\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"The setuptools.Distribution subclass with Airflow specific behaviour\"\"\"\n\n def __init__(self, attrs=None):\n super().__init__(attrs)\n self.install_requires = None\n\n def parse_config_files(self, *args, **kwargs) -> None:\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes its dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n For K8s and Celery which are both \"Core executors\" and \"Providers\" we have to\n add the base dependencies to core as well, in order to mitigate problems where\n newer version of provider will have less strict limits. This should be done for both\n extras and their deprecated aliases. This is not a full protection however, the way\n extras work, this will not add \"hard\" limits for Airflow and the user who does not use\n constraints.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n else:\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\n \"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\", \"trino\"]\n )\n add_all_deprecated_provider_packages()\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self) -> None: # type: ignore\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install, # type: ignore\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs, # type: ignore\n )\n\n\nif __name__ == \"__main__\":\n do_setup() # comment\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#\\n# Licensed to the Apache Software Foundation (ASF) under one\\n# or more contributor license agreements. See the NOTICE file\\n# distributed with this work for additional information\\n# regarding copyright ownership. The ASF licenses this file\\n# to you under the Apache License, Version 2.0 (the\\n# \\\"License\\\"); you may not use this file except in compliance\\n# with the License. You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing,\\n# software distributed under the License is distributed on an\\n# \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\\n# KIND, either express or implied. See the License for the\\n# specific language governing permissions and limitations\\n# under the License.\\n\\\"\\\"\\\"Setup.py for the Airflow project.\\\"\\\"\\\"\\nimport glob\\nimport logging\\nimport os\\nimport subprocess\\nimport sys\\nimport unittest\\nfrom copy import deepcopy\\nfrom os.path import dirname, relpath\\nfrom textwrap import wrap\\nfrom typing import Dict, List\\n\\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\\nfrom setuptools.command.develop import develop as develop_orig\\nfrom setuptools.command.install import install as install_orig\\n\\n# Setuptools patches this import to point to a vendored copy instead of the\\n# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.\\nfrom distutils import log # isort: skip\\n\\n# Controls whether providers are installed from packages or directly from sources\\n# It is turned on by default in case of development environments such as Breeze\\n# And it is particularly useful when you add a new provider and there is no\\n# PyPI version to install the provider package from\\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\\nPY39 = sys.version_info >= (3, 9)\\n\\nlogger = logging.getLogger(__name__)\\n\\nversion = '2.3.0.dev0'\\n\\nmy_dir = dirname(__file__)\\n\\n\\ndef airflow_test_suite() -> unittest.TestSuite:\\n \\\"\\\"\\\"Test suite for Airflow tests\\\"\\\"\\\"\\n test_loader = unittest.TestLoader()\\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\\n return test_suite\\n\\n\\nclass CleanCommand(Command):\\n \\\"\\\"\\\"\\n Command to tidy up the project root.\\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\\n \\\"\\\"\\\"\\n\\n description = \\\"Tidy up the project root\\\"\\n user_options: List[str] = []\\n\\n def initialize_options(self) -> None:\\n \\\"\\\"\\\"Set default values for options.\\\"\\\"\\\"\\n\\n def finalize_options(self) -> None:\\n \\\"\\\"\\\"Set final values for options.\\\"\\\"\\\"\\n\\n @staticmethod\\n def rm_all_files(files: List[str]) -> None:\\n \\\"\\\"\\\"Remove all files from the list\\\"\\\"\\\"\\n for file in files:\\n try:\\n os.remove(file)\\n except Exception as e:\\n logger.warning(\\\"Error when removing %s: %s\\\", file, e)\\n\\n def run(self) -> None:\\n \\\"\\\"\\\"Remove temporary files and directories.\\\"\\\"\\\"\\n os.chdir(my_dir)\\n self.rm_all_files(glob.glob('./build/*'))\\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\\n self.rm_all_files(glob.glob('./dist/*'))\\n self.rm_all_files(glob.glob('./*.egg-info'))\\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\\n\\n\\nclass CompileAssets(Command):\\n \\\"\\\"\\\"\\n Compile and build the frontend assets using yarn and webpack.\\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\\n \\\"\\\"\\\"\\n\\n description = \\\"Compile and build the frontend assets\\\"\\n user_options: List[str] = []\\n\\n def initialize_options(self) -> None:\\n \\\"\\\"\\\"Set default values for options.\\\"\\\"\\\"\\n\\n def finalize_options(self) -> None:\\n \\\"\\\"\\\"Set final values for options.\\\"\\\"\\\"\\n\\n def run(self) -> None:\\n \\\"\\\"\\\"Run a command to compile and build assets.\\\"\\\"\\\"\\n subprocess.check_call('./airflow/www/compile_assets.sh')\\n\\n\\nclass ListExtras(Command):\\n \\\"\\\"\\\"\\n List all available extras\\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\\n \\\"\\\"\\\"\\n\\n description = \\\"List available extras\\\"\\n user_options: List[str] = []\\n\\n def initialize_options(self) -> None:\\n \\\"\\\"\\\"Set default values for options.\\\"\\\"\\\"\\n\\n def finalize_options(self) -> None:\\n \\\"\\\"\\\"Set final values for options.\\\"\\\"\\\"\\n\\n def run(self) -> None:\\n \\\"\\\"\\\"List extras.\\\"\\\"\\\"\\n print(\\\"\\\\n\\\".join(wrap(\\\", \\\".join(EXTRAS_REQUIREMENTS.keys()), 100)))\\n\\n\\ndef git_version(version_: str) -> str:\\n \\\"\\\"\\\"\\n Return a version to identify the state of the underlying git repo. The version will\\n indicate whether the head of the current git-backed working directory is tied to a\\n release tag or not : it will indicate the former with a 'release:{version}' prefix\\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\\n branch head. Finally, a \\\"dirty\\\" suffix is appended to indicate that uncommitted\\n changes are present.\\n\\n :param str version_: Semver version\\n :return: Found Airflow version in Git repo\\n :rtype: str\\n \\\"\\\"\\\"\\n try:\\n import git\\n\\n try:\\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\\n except git.NoSuchPathError:\\n logger.warning('.git directory not found: Cannot compute the git version')\\n return ''\\n except git.InvalidGitRepositoryError:\\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\\n return ''\\n except ImportError:\\n logger.warning('gitpython not found: Cannot compute the git version.')\\n return ''\\n if repo:\\n sha = repo.head.commit.hexsha\\n if repo.is_dirty():\\n return f'.dev0+{sha}.dirty'\\n # commit is clean\\n return f'.release:{version_}+{sha}'\\n return 'no_git_version'\\n\\n\\ndef write_version(filename: str = os.path.join(*[my_dir, \\\"airflow\\\", \\\"git_version\\\"])) -> None:\\n \\\"\\\"\\\"\\n Write the Semver version + git hash to file, e.g. \\\".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\\\".\\n\\n :param str filename: Destination file to write\\n \\\"\\\"\\\"\\n text = f\\\"{git_version(version)}\\\"\\n with open(filename, 'w') as file:\\n file.write(text)\\n\\n\\npandas_requirement = 'pandas>=0.17.1'\\n\\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\\n# Start dependencies group\\nalibaba = [\\n 'oss2>=2.14.0',\\n]\\namazon = [\\n 'boto3>=1.15.0',\\n # watchtower 3 has been released end Jan and introduced breaking change across the board that might\\n # change logging behaviour:\\n # https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26\\n # TODO: update to watchtower >3\\n 'watchtower~=2.0.1',\\n 'jsonpath_ng>=1.5.3',\\n 'redshift_connector>=2.0.888',\\n 'sqlalchemy_redshift>=0.8.6',\\n pandas_requirement,\\n 'mypy-boto3-rds>=1.21.0',\\n 'mypy-boto3-redshift-data>=1.21.0',\\n]\\napache_beam = [\\n 'apache-beam>=2.33.0',\\n]\\narangodb = ['python-arango>=7.3.2']\\nasana = ['asana>=0.10']\\nasync_packages = [\\n 'eventlet>=0.9.7',\\n 'gevent>=0.13',\\n 'greenlet>=0.4.9',\\n]\\natlas = [\\n 'atlasclient>=0.1.2',\\n]\\nazure = [\\n 'azure-batch>=8.0.0',\\n 'azure-cosmos>=4.0.0',\\n 'azure-datalake-store>=0.0.45',\\n 'azure-identity>=1.3.1',\\n 'azure-keyvault-secrets>=4.1.0,<5.0',\\n 'azure-kusto-data>=0.0.43,<0.1',\\n # Azure integration uses old librarires and the limits below reflect that\\n # TODO: upgrade to newer versions of all the below libraries\\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\\n 'azure-mgmt-datalake-store>=0.5.0',\\n 'azure-mgmt-resource>=2.2.0',\\n # limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9\\n 'azure-storage-blob>=12.7.0,<12.9.0',\\n 'azure-storage-common>=2.1.0',\\n 'azure-storage-file>=2.1.0',\\n]\\ncassandra = [\\n 'cassandra-driver>=3.13.0',\\n]\\ncelery = [\\n # The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core\\n # Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer\\n # (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).\\n # This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only\\n # deliberately bump the version when we tested it, and we know it can be bumped.\\n # Bumping this version should also be connected with\\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\\n # potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow\\n # core is not hard-limited via install-requirements, only by extra).\\n 'celery>=5.2.3,<6',\\n 'flower>=1.0.0',\\n]\\ncgroups = [\\n # Cgroupspy 0.2.2 added Python 3.10 compatibility\\n 'cgroupspy>=0.2.2',\\n]\\ncloudant = [\\n 'cloudant>=2.0',\\n]\\ndask = [\\n # Dask support is limited, we need Dask team to upgrade support for dask if we were to continue\\n # Supporting it in the future\\n 'cloudpickle>=1.4.1',\\n 'dask>=2.9.0',\\n 'distributed>=2.11.1',\\n]\\ndatabricks = [\\n 'requests>=2.26.0, <3',\\n 'databricks-sql-connector>=1.0.2, <2.0.0',\\n]\\ndatadog = [\\n 'datadog>=0.14.0',\\n]\\ndeprecated_api = [\\n 'requests>=2.26.0',\\n]\\ndoc = [\\n 'click>=8.0',\\n 'sphinx>=4.4.0',\\n # Docutils 0.17.0 converts generated
into
and breaks our doc formatting\\n # By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle\\n #
tags for sections\\n 'docutils<0.17.0',\\n # Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,\\n # even though Sphinx 4.4.0 has this but with python_version<3.10.\\n 'importlib-metadata>=4.4; python_version < \\\"3.8\\\"',\\n 'sphinx-airflow-theme',\\n 'sphinx-argparse>=0.1.13',\\n 'sphinx-autoapi>=1.8.0',\\n 'sphinx-copybutton',\\n 'sphinx-jinja>=2.0',\\n 'sphinx-rtd-theme>=0.1.6',\\n 'sphinxcontrib-httpdomain>=1.7.0',\\n 'sphinxcontrib-redoc>=1.6.0',\\n 'sphinxcontrib-spelling>=7.3',\\n]\\ndocker = [\\n 'docker>=5.0.3',\\n]\\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\\ndruid = [\\n 'pydruid>=0.4.1',\\n]\\nelasticsearch = [\\n 'elasticsearch>7',\\n 'elasticsearch-dbapi',\\n 'elasticsearch-dsl>=5.0.0',\\n]\\nexasol = ['pyexasol>=0.5.1', pandas_requirement]\\nfacebook = [\\n 'facebook-business>=6.0.2',\\n]\\nflask_appbuilder_authlib = [\\n 'authlib',\\n]\\ngithub = [\\n 'pygithub',\\n]\\ngoogle = [\\n # Google has very clear rules on what dependencies should be used. All the limits below\\n # follow strict guidelines of Google Libraries as quoted here:\\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\\n # should preserve >1, <3 pins on these packages.\\n # https://github.com/googleapis/google-cloud-python/issues/10566\\n # Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries\\n # Introduced breaking changes across the board. Those libraries should be upgraded soon\\n # TODO: Upgrade all Google libraries that are limited to <2.0.0\\n 'PyOpenSSL',\\n # The Google Ads 14.0.1 breaks PIP and eager upgrade as it requires\\n # google-api-core>=2.0.0 which cannot be used yet (see below comment)\\n # and https://github.com/apache/airflow/issues/18705#issuecomment-933746150\\n 'google-ads>=12.0.0,<14.0.1',\\n 'google-api-core>=2.7.0,<3.0.0',\\n 'google-api-python-client>=1.6.0,<2.0.0',\\n 'google-auth>=1.0.0',\\n 'google-auth-httplib2>=0.0.1',\\n 'google-cloud-aiplatform>=1.7.1,<2.0.0',\\n 'google-cloud-automl>=2.1.0',\\n 'google-cloud-bigquery-datatransfer>=3.0.0',\\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\\n 'google-cloud-build>=3.0.0',\\n 'google-cloud-container>=0.1.1,<2.0.0',\\n 'google-cloud-datacatalog>=3.0.0',\\n 'google-cloud-dataplex>=0.1.0',\\n 'google-cloud-dataproc>=3.1.0',\\n 'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',\\n 'google-cloud-dlp>=0.11.0,<2.0.0',\\n 'google-cloud-kms>=2.0.0',\\n 'google-cloud-language>=1.1.1,<2.0.0',\\n 'google-cloud-logging>=2.1.1',\\n 'google-cloud-memcache>=0.2.0',\\n 'google-cloud-monitoring>=2.0.0',\\n 'google-cloud-os-login>=2.0.0',\\n 'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',\\n 'google-cloud-pubsub>=2.0.0',\\n 'google-cloud-redis>=2.0.0',\\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\\n 'google-cloud-spanner>=1.10.0,<2.0.0',\\n 'google-cloud-speech>=0.36.3,<2.0.0',\\n 'google-cloud-storage>=1.30,<2.0.0',\\n 'google-cloud-tasks>=2.0.0',\\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\\n 'google-cloud-translate>=1.5.0,<2.0.0',\\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\\n 'google-cloud-vision>=0.35.2,<2.0.0',\\n 'google-cloud-workflows>=0.1.0,<2.0.0',\\n 'grpcio-gcp>=0.2.2',\\n 'httpx',\\n 'json-merge-patch>=0.2',\\n 'looker-sdk>=22.2.0',\\n 'pandas-gbq',\\n pandas_requirement,\\n 'sqlalchemy-bigquery>=1.2.1',\\n]\\ngrpc = [\\n # Google has very clear rules on what dependencies should be used. All the limits below\\n # follow strict guidelines of Google Libraries as quoted here:\\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\\n # should preserve >1, <3 pins on these packages.\\n # https://github.com/googleapis/google-cloud-python/issues/10566\\n 'google-auth>=1.0.0, <3.0.0',\\n 'google-auth-httplib2>=0.0.1',\\n 'grpcio>=1.15.0',\\n]\\nhashicorp = [\\n 'hvac>=0.10',\\n]\\nhdfs = [\\n 'snakebite-py3',\\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\\n]\\nhive = [\\n 'hmsclient>=0.1.0',\\n 'pyhive[hive]>=0.6.0',\\n # in case of Python 3.9 sasl library needs to be installed with version higher or equal than\\n # 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls\\n # the sasl library anyway (and there sasl library version is not relevant)\\n 'sasl>=0.3.1; python_version>=\\\"3.9\\\"',\\n 'thrift>=0.9.2',\\n pandas_requirement,\\n]\\nhttp = [\\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\\n # release it as a requirement for airflow\\n 'requests>=2.26.0',\\n]\\nhttp_provider = [\\n 'apache-airflow-providers-http',\\n]\\ninfluxdb = [\\n 'influxdb-client>=1.19.0',\\n pandas_requirement,\\n]\\njdbc = [\\n 'jaydebeapi>=1.1.1',\\n]\\njenkins = [\\n 'python-jenkins>=1.0.0',\\n]\\njira = [\\n 'JIRA>1.0.7',\\n]\\nkerberos = [\\n 'pykerberos>=1.1.13',\\n 'requests_kerberos>=0.10.0',\\n 'thrift_sasl>=0.2.0',\\n]\\nkubernetes = [\\n 'cryptography>=2.0.0',\\n # The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core\\n # Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer\\n # (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow\\n # So we should limit it to the next MAJOR version and only deliberately bump the version when we\\n # tested it, and we know it can be bumped. Bumping this version should also be connected with\\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\\n # potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow\\n # core is not hard-limited via install-requirements, only by extra).\\n 'kubernetes>=21.7.0,<24',\\n]\\nkylin = ['kylinpy>=2.6']\\nldap = [\\n 'ldap3>=2.5.1',\\n 'python-ldap',\\n]\\nleveldb = ['plyvel; platform_machine != \\\"aarch64\\\"']\\nmongo = [\\n 'dnspython>=1.13.0',\\n # pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0\\n # TODO: Upgrade to pymongo 4.0.0+\\n 'pymongo>=3.6.0,<4.0.0',\\n]\\nmssql = [\\n 'pymssql>=2.1.5; platform_machine != \\\"aarch64\\\"',\\n]\\nmysql = [\\n 'mysql-connector-python>=8.0.11; platform_machine != \\\"aarch64\\\"',\\n 'mysqlclient>=1.3.6; platform_machine != \\\"aarch64\\\"',\\n]\\nneo4j = ['neo4j>=4.2.1']\\nodbc = [\\n 'pyodbc',\\n]\\nopsgenie = [\\n 'opsgenie-sdk>=2.1.5',\\n]\\noracle = [\\n 'cx_Oracle>=5.1.2',\\n]\\npagerduty = [\\n 'pdpyras>=4.1.2',\\n]\\npandas = [\\n pandas_requirement,\\n]\\npapermill = [\\n 'papermill[all]>=1.2.1',\\n 'scrapbook[all]',\\n]\\npassword = [\\n 'bcrypt>=2.0.0',\\n 'flask-bcrypt>=0.7.1',\\n]\\npinot = [\\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\\n # causes a problem with newer versions.\\n 'pinotdb>0.1.2',\\n]\\nplexus = [\\n 'arrow>=0.16.0',\\n]\\npostgres = [\\n 'psycopg2-binary>=2.7.4',\\n]\\npresto = [\\n # The limit to Presto 0.8 for unknown reason\\n # TODO: Remove the limit\\n 'presto-python-client>=0.7.0,<0.8',\\n pandas_requirement,\\n]\\npsrp = [\\n 'pypsrp>=0.8',\\n]\\nqubole = [\\n 'qds-sdk>=1.10.4',\\n]\\nrabbitmq = [\\n 'amqp',\\n]\\nredis = [\\n # Redis 4 introduced a number of changes that likely need testing including mixins in redis commands\\n # as well as unquoting URLS with `urllib.parse.unquote`:\\n # https://github.com/redis/redis-py/blob/master/CHANGES\\n # TODO: upgrade to support redis package >=4\\n 'redis~=3.2',\\n]\\nsalesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]\\nsamba = [\\n 'smbprotocol>=1.5.0',\\n]\\nsegment = [\\n 'analytics-python>=1.2.9',\\n]\\nsendgrid = [\\n 'sendgrid>=6.0.0',\\n]\\nsentry = [\\n 'blinker>=1.1',\\n 'sentry-sdk>=0.8.0',\\n]\\nsingularity = ['spython>=0.0.56']\\nslack = [\\n 'slack_sdk>=3.0.0',\\n]\\nsnowflake = [\\n 'snowflake-connector-python>=2.4.1',\\n 'snowflake-sqlalchemy>=1.1.0',\\n]\\nspark = [\\n 'pyspark',\\n]\\nssh = [\\n 'paramiko>=2.6.0',\\n 'pysftp>=0.2.9',\\n 'sshtunnel>=0.3.2',\\n]\\nstatsd = [\\n 'statsd>=3.3.0',\\n]\\ntableau = [\\n 'tableauserverclient',\\n]\\ntelegram = [\\n 'python-telegram-bot>=13.0',\\n]\\ntrino = [\\n 'trino>=0.301.0',\\n pandas_requirement,\\n]\\nvertica = [\\n 'vertica-python>=0.5.1',\\n]\\nvirtualenv = [\\n 'virtualenv',\\n]\\nwebhdfs = [\\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\\n]\\nwinrm = [\\n 'pywinrm>=0.4',\\n]\\nyandex = [\\n 'yandexcloud>=0.146.0',\\n]\\nzendesk = [\\n 'zenpy>=2.0.24',\\n]\\n# End dependencies group\\n\\n# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them\\n# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\\n# for details. Wy want to install them explicitly because we want to eventually move to\\n# mypyd which does not support installing the types dynamically with --install-types\\nmypy_dependencies = [\\n # TODO: upgrade to newer versions of MyPy continuously as they are released\\n 'mypy==0.910',\\n 'types-boto',\\n 'types-certifi',\\n 'types-croniter',\\n 'types-Deprecated',\\n 'types-docutils',\\n 'types-freezegun',\\n 'types-paramiko',\\n 'types-protobuf',\\n 'types-python-dateutil',\\n 'types-python-slugify',\\n 'types-pytz',\\n 'types-redis',\\n 'types-requests',\\n 'types-setuptools',\\n 'types-termcolor',\\n 'types-tabulate',\\n 'types-toml',\\n 'types-Markdown',\\n 'types-PyMySQL',\\n 'types-PyYAML',\\n]\\n\\n# Dependencies needed for development only\\ndevel_only = [\\n 'aws_xray_sdk',\\n 'beautifulsoup4>=4.7.1',\\n 'black',\\n 'blinker',\\n 'bowler',\\n 'click>=8.0',\\n 'coverage',\\n 'filelock',\\n 'flake8>=3.6.0',\\n 'flake8-colors',\\n 'flaky',\\n 'freezegun',\\n # Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0\\n # Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without\\n # getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version\\n # similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there\\n # are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails\\n # to resolve that github3 3.0.0 is the right version to use.\\n # This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when\\n # pip resolution is improved to handle the case. The issue which describes this PIP behaviour\\n # and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924\\n 'github3.py<3.1.0',\\n 'gitpython',\\n 'ipdb',\\n 'jira',\\n 'jsondiff',\\n 'mongomock',\\n 'moto>=3.1.0',\\n 'parameterized',\\n 'paramiko',\\n 'pipdeptree',\\n 'pre-commit',\\n 'pypsrp',\\n 'pygithub',\\n 'pysftp',\\n # Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit\\n # It contains a number of potential breaking changes but none of them looks breaking our use\\n # https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03\\n # TODO: upgrade it and remove the limit\\n 'pytest~=6.0',\\n 'pytest-asyncio',\\n 'pytest-cov',\\n 'pytest-instafail',\\n # We should attempt to remove the limit when we upgrade Pytest\\n # TODO: remove the limit when we upgrade pytest\\n 'pytest-rerunfailures~=9.1',\\n 'pytest-timeouts',\\n 'pytest-xdist',\\n 'python-jose',\\n 'pywinrm',\\n 'qds-sdk>=1.9.6',\\n 'pytest-httpx',\\n 'requests_mock',\\n 'rich_click',\\n 'semver',\\n 'twine',\\n 'wheel',\\n 'yamllint',\\n]\\n\\ndevel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password\\ndevel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs\\n\\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\\n 'airbyte': http_provider,\\n 'alibaba': alibaba,\\n 'amazon': amazon,\\n 'apache.beam': apache_beam,\\n 'apache.cassandra': cassandra,\\n 'apache.drill': drill,\\n 'apache.druid': druid,\\n 'apache.hdfs': hdfs,\\n 'apache.hive': hive,\\n 'apache.kylin': kylin,\\n 'apache.livy': http_provider,\\n 'apache.pig': [],\\n 'apache.pinot': pinot,\\n 'apache.spark': spark,\\n 'apache.sqoop': [],\\n 'arangodb': arangodb,\\n 'asana': asana,\\n 'celery': celery,\\n 'cloudant': cloudant,\\n 'cncf.kubernetes': kubernetes,\\n 'databricks': databricks,\\n 'datadog': datadog,\\n 'dbt.cloud': http_provider,\\n 'dingding': [],\\n 'discord': [],\\n 'docker': docker,\\n 'elasticsearch': elasticsearch,\\n 'exasol': exasol,\\n 'facebook': facebook,\\n 'ftp': [],\\n 'github': github,\\n 'google': google,\\n 'grpc': grpc,\\n 'hashicorp': hashicorp,\\n 'http': http,\\n 'imap': [],\\n 'influxdb': influxdb,\\n 'jdbc': jdbc,\\n 'jenkins': jenkins,\\n 'jira': jira,\\n 'microsoft.azure': azure,\\n 'microsoft.mssql': mssql,\\n 'microsoft.psrp': psrp,\\n 'microsoft.winrm': winrm,\\n 'mongo': mongo,\\n 'mysql': mysql,\\n 'neo4j': neo4j,\\n 'odbc': odbc,\\n 'openfaas': [],\\n 'opsgenie': opsgenie,\\n 'oracle': oracle,\\n 'pagerduty': pagerduty,\\n 'papermill': papermill,\\n 'plexus': plexus,\\n 'postgres': postgres,\\n 'presto': presto,\\n 'qubole': qubole,\\n 'redis': redis,\\n 'salesforce': salesforce,\\n 'samba': samba,\\n 'segment': segment,\\n 'sendgrid': sendgrid,\\n 'sftp': ssh,\\n 'singularity': singularity,\\n 'slack': slack,\\n 'snowflake': snowflake,\\n 'sqlite': [],\\n 'ssh': ssh,\\n 'tableau': tableau,\\n 'telegram': telegram,\\n 'trino': trino,\\n 'vertica': vertica,\\n 'yandex': yandex,\\n 'zendesk': zendesk,\\n}\\n\\n# Those are all additional extras which do not have their own 'providers'\\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\\n# That can be used in custom python/bash operators.\\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\\n 'apache.atlas': atlas,\\n 'apache.webhdfs': webhdfs,\\n}\\n\\n\\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\\n 'async': async_packages,\\n 'celery': celery, # also has provider, but it extends the core with the CeleryExecutor\\n 'cgroups': cgroups,\\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\\n 'dask': dask,\\n 'deprecated_api': deprecated_api,\\n 'github_enterprise': flask_appbuilder_authlib,\\n 'google_auth': flask_appbuilder_authlib,\\n 'kerberos': kerberos,\\n 'ldap': ldap,\\n 'leveldb': leveldb,\\n 'pandas': pandas,\\n 'password': password,\\n 'rabbitmq': rabbitmq,\\n 'sentry': sentry,\\n 'statsd': statsd,\\n 'virtualenv': virtualenv,\\n}\\n\\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\\n\\n\\ndef add_extras_for_all_providers() -> None:\\n \\\"\\\"\\\"\\n Adds extras for all providers.\\n By default all providers have the same extra name as provider id, for example\\n 'apache.hive' extra has 'apache.hive' provider requirement.\\n \\\"\\\"\\\"\\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\\n\\n\\ndef add_additional_extras() -> None:\\n \\\"\\\"\\\"Adds extras for all additional extras.\\\"\\\"\\\"\\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\\n\\n\\nadd_extras_for_all_providers()\\nadd_additional_extras()\\n\\n#############################################################################################################\\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\\n#############################################################################################################\\n\\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\\n 'atlas': 'apache.atlas',\\n 'aws': 'amazon',\\n 'azure': 'microsoft.azure',\\n 'cassandra': 'apache.cassandra',\\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\\n 'druid': 'apache.druid',\\n 'gcp': 'google',\\n 'gcp_api': 'google',\\n 'hdfs': 'apache.hdfs',\\n 'hive': 'apache.hive',\\n 'kubernetes': 'cncf.kubernetes',\\n 'mssql': 'microsoft.mssql',\\n 'pinot': 'apache.pinot',\\n 'qds': 'qubole',\\n 's3': 'amazon',\\n 'spark': 'apache.spark',\\n 'webhdfs': 'apache.webhdfs',\\n 'winrm': 'microsoft.winrm',\\n}\\n\\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\\n \\\"crypto\\\",\\n \\\"webhdfs\\\",\\n]\\n\\n\\ndef add_extras_for_all_deprecated_aliases() -> None:\\n \\\"\\\"\\\"\\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\\n as the extras they are replaced with.\\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\\n \\\"\\\"\\\"\\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\\n if requirements is None:\\n raise Exception(f\\\"The extra {extra} is missing for deprecated alias {alias}\\\")\\n EXTRAS_REQUIREMENTS[alias] = requirements\\n\\n\\ndef add_all_deprecated_provider_packages() -> None:\\n \\\"\\\"\\\"\\n For deprecated aliases that are providers, we will swap the providers requirements to instead\\n be the provider itself.\\n\\n e.g. {\\\"kubernetes\\\": [\\\"kubernetes>=3.0.0, <12.0.0\\\", ...]} becomes\\n {\\\"kubernetes\\\": [\\\"apache-airflow-provider-cncf-kubernetes\\\"]}\\n \\\"\\\"\\\"\\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\\n continue\\n replace_extra_requirement_with_provider_packages(alias, [provider])\\n\\n\\nadd_extras_for_all_deprecated_aliases()\\n\\n#############################################################################################################\\n# End of deprecated section\\n#############################################################################################################\\n\\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\\n# All providers. It is used by pre-commits.\\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\\n\\nALL_DB_PROVIDERS = [\\n 'apache.cassandra',\\n 'apache.drill',\\n 'apache.druid',\\n 'apache.hdfs',\\n 'apache.hive',\\n 'apache.pinot',\\n 'arangodb',\\n 'cloudant',\\n 'databricks',\\n 'exasol',\\n 'influxdb',\\n 'microsoft.mssql',\\n 'mongo',\\n 'mysql',\\n 'neo4j',\\n 'postgres',\\n 'presto',\\n 'trino',\\n 'vertica',\\n]\\n\\n# Special requirements for all database-related providers. They are de-duplicated.\\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\\n\\n# Requirements for all \\\"user\\\" extras (no devel). They are de-duplicated. Note that we do not need\\n# to separately add providers requirements - they have been already added as 'providers' extras above\\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\\n\\n# All user extras here\\nEXTRAS_REQUIREMENTS[\\\"all\\\"] = _all_requirements\\n\\n# All db user extras here\\nEXTRAS_REQUIREMENTS[\\\"all_dbs\\\"] = all_dbs + pandas\\n\\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\\n# but we keep it for explicit sake. We are de-duplicating it anyway.\\ndevel_all = list(set(_all_requirements + doc + devel + devel_hadoop))\\n\\n# Those are packages excluded for \\\"all\\\" dependencies\\nPACKAGES_EXCLUDED_FOR_ALL = []\\nPACKAGES_EXCLUDED_FOR_ALL.extend(\\n [\\n 'snakebite',\\n ]\\n)\\n\\n\\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\\n \\\"\\\"\\\"\\n Checks if package should be excluded.\\n\\n :param package: package name (beginning of it)\\n :param exclusion_list: list of excluded packages\\n :return: true if package should be excluded\\n \\\"\\\"\\\"\\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\\n\\n\\ndevel_all = [\\n package\\n for package in devel_all\\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\\n]\\n\\ndevel_ci = devel_all\\n\\n\\n# Those are extras that we have to add for development purposes\\n# They can be use to install some predefined set of dependencies.\\nEXTRAS_REQUIREMENTS[\\\"doc\\\"] = doc\\nEXTRAS_REQUIREMENTS[\\\"devel\\\"] = devel # devel already includes doc\\nEXTRAS_REQUIREMENTS[\\\"devel_hadoop\\\"] = devel_hadoop # devel_hadoop already includes devel\\nEXTRAS_REQUIREMENTS[\\\"devel_all\\\"] = devel_all\\nEXTRAS_REQUIREMENTS[\\\"devel_ci\\\"] = devel_ci\\n\\n\\ndef sort_extras_requirements() -> Dict[str, List[str]]:\\n \\\"\\\"\\\"\\n The dictionary order remains when keys() are retrieved.\\n Sort both: extras and list of dependencies to make it easier to analyse problems\\n external packages will be first, then if providers are added they are added at the end of the lists.\\n \\\"\\\"\\\"\\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\\n for extra_list in sorted_requirements.values():\\n extra_list.sort()\\n return sorted_requirements\\n\\n\\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\\n\\n# Those providers are pre-installed always when airflow is installed.\\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\\nPREINSTALLED_PROVIDERS = [\\n 'ftp',\\n 'http',\\n 'imap',\\n 'sqlite',\\n]\\n\\n\\ndef get_provider_package_from_package_id(package_id: str) -> str:\\n \\\"\\\"\\\"\\n Builds the name of provider package out of the package id provided/\\n\\n :param package_id: id of the package (like amazon or microsoft.azure)\\n :return: full name of package in PyPI\\n \\\"\\\"\\\"\\n package_suffix = package_id.replace(\\\".\\\", \\\"-\\\")\\n return f\\\"apache-airflow-providers-{package_suffix}\\\"\\n\\n\\ndef get_excluded_providers() -> List[str]:\\n \\\"\\\"\\\"Returns packages excluded for the current python version.\\\"\\\"\\\"\\n return []\\n\\n\\ndef get_all_provider_packages() -> str:\\n \\\"\\\"\\\"Returns all provider packages configured in setup.py\\\"\\\"\\\"\\n excluded_providers = get_excluded_providers()\\n return \\\" \\\".join(\\n get_provider_package_from_package_id(package)\\n for package in PROVIDERS_REQUIREMENTS\\n if package not in excluded_providers\\n )\\n\\n\\nclass AirflowDistribution(Distribution):\\n \\\"\\\"\\\"The setuptools.Distribution subclass with Airflow specific behaviour\\\"\\\"\\\"\\n\\n def __init__(self, attrs=None):\\n super().__init__(attrs)\\n self.install_requires = None\\n\\n def parse_config_files(self, *args, **kwargs) -> None:\\n \\\"\\\"\\\"\\n Ensure that when we have been asked to install providers from sources\\n that we don't *also* try to install those providers from PyPI.\\n Also we should make sure that in this case we copy provider.yaml files so that\\n Providers manager can find package information.\\n \\\"\\\"\\\"\\n super().parse_config_files(*args, **kwargs)\\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\\n self.install_requires = [\\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\\n ]\\n provider_yaml_files = glob.glob(\\\"airflow/providers/**/provider.yaml\\\", recursive=True)\\n for provider_yaml_file in provider_yaml_files:\\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \\\"airflow\\\"))\\n self.package_data['airflow'].append(provider_relative_path)\\n else:\\n self.install_requires.extend(\\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\\n )\\n\\n\\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\\n \\\"\\\"\\\"\\n Replaces extra requirement with provider package. The intention here is that when\\n the provider is added as dependency of extra, there is no need to add the dependencies\\n separately. This is not needed and even harmful, because in case of future versions of\\n the provider, the requirements might change, so hard-coding requirements from the version\\n that was available at the release time might cause dependency conflicts in the future.\\n\\n Say for example that you have salesforce provider with those deps:\\n\\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\\n\\n Initially ['salesforce'] extra has those requirements and it works like that when you install\\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\\n the production installation is used, The dependencies are changed:\\n\\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\\n\\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\\n\\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\\n changes its dependencies, they will transitively change as well.\\n\\n In the constraint mechanism we save both - provider versions and it's dependencies\\n version, which means that installation using constraints is repeatable.\\n\\n For K8s and Celery which are both \\\"Core executors\\\" and \\\"Providers\\\" we have to\\n add the base dependencies to core as well, in order to mitigate problems where\\n newer version of provider will have less strict limits. This should be done for both\\n extras and their deprecated aliases. This is not a full protection however, the way\\n extras work, this will not add \\\"hard\\\" limits for Airflow and the user who does not use\\n constraints.\\n\\n :param extra: Name of the extra to add providers to\\n :param providers: list of provider ids\\n \\\"\\\"\\\"\\n if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:\\n EXTRAS_REQUIREMENTS[extra].extend(\\n [get_provider_package_from_package_id(package_name) for package_name in providers]\\n )\\n else:\\n EXTRAS_REQUIREMENTS[extra] = [\\n get_provider_package_from_package_id(package_name) for package_name in providers\\n ]\\n\\n\\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\\n \\\"\\\"\\\"\\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\\n to the \\\"bulk\\\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\\n initial values, so instead of replacing them (see previous function) we can extend them.\\n\\n :param extra: Name of the extra to add providers to\\n :param providers: list of provider ids\\n \\\"\\\"\\\"\\n EXTRAS_REQUIREMENTS[extra].extend(\\n [get_provider_package_from_package_id(package_name) for package_name in providers]\\n )\\n\\n\\ndef add_all_provider_packages() -> None:\\n \\\"\\\"\\\"\\n In case of regular installation (providers installed from packages), we should add extra dependencies to\\n Airflow - to get the providers automatically installed when those extras are installed.\\n\\n For providers installed from sources we skip that step. That helps to test and install airflow with\\n all packages in CI - for example when new providers are added, otherwise the installation would fail\\n as the new provider is not yet in PyPI.\\n\\n \\\"\\\"\\\"\\n for provider in ALL_PROVIDERS:\\n replace_extra_requirement_with_provider_packages(provider, [provider])\\n add_provider_packages_to_extra_requirements(\\\"all\\\", ALL_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\\"devel_ci\\\", ALL_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\\"devel_all\\\", ALL_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\\"all_dbs\\\", ALL_DB_PROVIDERS)\\n add_provider_packages_to_extra_requirements(\\n \\\"devel_hadoop\\\", [\\\"apache.hdfs\\\", \\\"apache.hive\\\", \\\"presto\\\", \\\"trino\\\"]\\n )\\n add_all_deprecated_provider_packages()\\n\\n\\nclass Develop(develop_orig):\\n \\\"\\\"\\\"Forces removal of providers in editable mode.\\\"\\\"\\\"\\n\\n def run(self) -> None: # type: ignore\\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\\n # We need to run \\\"python3 -m pip\\\" because it might be that older PIP binary is in the path\\n # And it results with an error when running pip directly (cannot import pip module)\\n # also PIP does not have a stable API so we have to run subprocesses ¯\\\\_(ツ)_/¯\\n try:\\n installed_packages = (\\n subprocess.check_output([\\\"python3\\\", \\\"-m\\\", \\\"pip\\\", \\\"freeze\\\"]).decode().splitlines()\\n )\\n airflow_provider_packages = [\\n package_line.split(\\\"=\\\")[0]\\n for package_line in installed_packages\\n if package_line.startswith(\\\"apache-airflow-providers\\\")\\n ]\\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\\n subprocess.check_call([\\\"python3\\\", \\\"-m\\\", \\\"pip\\\", \\\"uninstall\\\", \\\"--yes\\\", *airflow_provider_packages])\\n except subprocess.CalledProcessError as e:\\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\\n super().run()\\n\\n\\nclass Install(install_orig):\\n \\\"\\\"\\\"Forces installation of providers from sources in editable mode.\\\"\\\"\\\"\\n\\n def run(self) -> None:\\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\\n super().run()\\n\\n\\ndef do_setup() -> None:\\n \\\"\\\"\\\"\\n Perform the Airflow package setup.\\n\\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\\n \\\"\\\"\\\"\\n setup_kwargs = {}\\n\\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\\n \\\"\\\"\\\"\\n When installing providers from sources we install all namespace packages found below airflow,\\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\\n The kwargs in setup() call override those that are specified in setup.cfg.\\n \\\"\\\"\\\"\\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\\n\\n include_provider_namespace_packages_when_installing_from_sources()\\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\\n print(\\\"Installing providers from sources. Skip adding providers as dependencies\\\")\\n else:\\n add_all_provider_packages()\\n\\n write_version()\\n setup(\\n distclass=AirflowDistribution,\\n version=version,\\n extras_require=EXTRAS_REQUIREMENTS,\\n download_url=('https://archive.apache.org/dist/airflow/' + version),\\n cmdclass={\\n 'extra_clean': CleanCommand,\\n 'compile_assets': CompileAssets,\\n 'list_extras': ListExtras,\\n 'install': Install, # type: ignore\\n 'develop': Develop,\\n },\\n test_suite='setup.airflow_test_suite',\\n **setup_kwargs, # type: ignore\\n )\\n\\n\\nif __name__ == \\\"__main__\\\":\\n do_setup() # comment\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/airflow/providers/databricks/provider.yaml b/airflow/providers/databricks/provider.yaml\nindex 3aba329e4d89d..4afbc0c5357db 100644\n--- a/airflow/providers/databricks/provider.yaml\n+++ b/airflow/providers/databricks/provider.yaml\n@@ -37,9 +37,6 @@ versions:\n additional-dependencies:\n - apache-airflow>=2.1.0\n \n-excluded-python-versions:\n- - \"3.10\"\n-\n integrations:\n - integration-name: Databricks\n external-doc-url: https://databricks.com/\ndiff --git a/docs/apache-airflow-providers-databricks/index.rst b/docs/apache-airflow-providers-databricks/index.rst\nindex 56836078c2516..51c8381f0c20c 100644\n--- a/docs/apache-airflow-providers-databricks/index.rst\n+++ b/docs/apache-airflow-providers-databricks/index.rst\n@@ -80,7 +80,7 @@ PIP requirements\n PIP package Version required\n ============================ ===================\n ``apache-airflow`` ``>=2.1.0``\n-``databricks-sql-connector`` ``>=1.0.0, <2.0.0``\n+``databricks-sql-connector`` ``>=1.0.2, <2.0.0``\n ``requests`` ``>=2.26.0, <3``\n ============================ ===================\n \ndiff --git a/setup.py b/setup.py\nindex e18cef450fd88..950285353f0ce 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -264,7 +264,7 @@ def write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\n ]\n databricks = [\n 'requests>=2.26.0, <3',\n- 'databricks-sql-connector>=1.0.0, <2.0.0',\n+ 'databricks-sql-connector>=1.0.2, <2.0.0',\n ]\n datadog = [\n 'datadog>=0.14.0',\ndiff --git a/tests/providers/databricks/operators/test_databricks_sql.py b/tests/providers/databricks/operators/test_databricks_sql.py\nindex 08e24d72d8012..6b9fb43701ae3 100644\n--- a/tests/providers/databricks/operators/test_databricks_sql.py\n+++ b/tests/providers/databricks/operators/test_databricks_sql.py\n@@ -24,7 +24,7 @@\n import pytest\n from databricks.sql.types import Row\n \n-from airflow import PY310, AirflowException\n+from airflow import AirflowException\n from airflow.providers.databricks.operators.databricks_sql import (\n DatabricksCopyIntoOperator,\n DatabricksSqlOperator,\n@@ -83,12 +83,6 @@ def test_exec_write_file(self, db_mock_class):\n db_mock.run.assert_called_once_with(sql, parameters=None)\n \n \n-@pytest.mark.skipif(\n- PY310,\n- reason=\"Databricks SQL tests not run on Python 3.10 because there is direct Iterable import from\"\n- \" collections in the databricks SQL library, where it should be imported from collections.abc.\"\n- \" This could be removed when https://github.com/apache/airflow/issues/22220 is solved\",\n-)\n class TestDatabricksSqlCopyIntoOperator(unittest.TestCase):\n def test_copy_with_files(self):\n op = DatabricksCopyIntoOperator(\n"}}},{"rowIdx":414,"cells":{"in_source_id":{"kind":"string","value":"zestedesavoir__zds-site-2605"},"issue":{"kind":"string","value":"[Bêta v1.8] Les flux rss ne fonctionnent plus => erreur 500\nEn direct de la beta, il est impossible d'avoir les flux rss en cliquant sur les liens correspondant. On se prend une erreur 500.\n\nC'est donc une regression, puisque ça marche en prod.\n\n"},"before_files":{"kind":"list like","value":[{"content":"# coding: utf-8\n\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'base.db'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Paris'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'fr-fr'\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = False\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\nLANGUAGES = (\n ('fr', _('Français')),\n ('en', _('Anglais')),\n)\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'dist'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nFIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'))\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&amp;y49uc#t=*wvi(9hnyii0z'\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\nMIDDLEWARE_CLASSES = (\n # CorsMiddleware needs to be before CommonMiddleware.\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'zds.utils.ThreadLocals',\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\n 'zds.middlewares.profile.ProfileMiddleware',\n)\n\nROOT_URLCONF = 'zds.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'zds.wsgi.application'\n\nTEMPLATE_DIRS = [\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'templates')\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n # Default context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n # ZDS context processors\n 'zds.utils.context_processor.app_settings',\n 'zds.utils.context_processor.git_version',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n\n 'easy_thumbnails',\n 'easy_thumbnails.optimize',\n 'crispy_forms',\n 'haystack',\n 'munin',\n 'social.apps.django_app.default',\n 'rest_framework',\n 'rest_framework_swagger',\n 'corsheaders',\n 'oauth2_provider',\n\n # Apps DB tables are created in THIS order by default\n # --> Order is CRITICAL to properly handle foreign keys\n 'zds.utils',\n 'zds.pages',\n 'zds.gallery',\n 'zds.mp',\n 'zds.article',\n 'zds.forum',\n 'zds.tutorial',\n 'zds.member',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n },\n}\n\nREST_FRAMEWORK = {\n # If the pagination isn't specify in the API, its configuration is\n # specified here.\n 'PAGINATE_BY': 10, # Default to 10\n 'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.\n 'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?page_size=xxx`.\n # Active OAuth2 authentication.\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'oauth2_provider.ext.rest_framework.OAuth2Authentication',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n #'rest_framework.parsers.XMLParser',\n 'rest_framework_xml.parsers.XMLParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n #'rest_framework.renderers.XMLRenderer',\n 'rest_framework_xml.renderers.XMLRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle'\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'anon': '60/hour',\n 'user': '2000/hour'\n }\n}\n\nREST_FRAMEWORK_EXTENSIONS = {\n # If the cache isn't specify in the API, the time of the cache\n # is specified here in seconds.\n 'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15\n}\n\nSWAGGER_SETTINGS = {\n 'enabled_methods': [\n 'get',\n 'post',\n 'put',\n 'delete'\n ]\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_METHODS = (\n 'GET',\n 'POST',\n 'PUT',\n 'DELETE',\n)\n\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'x-data-format'\n)\n\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'link'\n)\n\nif (DEBUG):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nLOGIN_URL = '/membres/connexion'\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\n}\n\n\n# Django fileserve settings (set to True for local dev version only)\nSERVE = False\n\nPANDOC_LOC = ''\nPANDOC_PDF_PARAM = \"--latex-engine=xelatex --template=../../assets/tex/template.tex -s -S -N --toc -V documentclass=scrbook -V lang=francais -V mainfont=Merriweather -V monofont=\\\"Andale Mono\\\" -V fontsize=12pt -V geometry:margin=1in \"\n# LOG PATH FOR PANDOC LOGGING\nPANDOC_LOG = './pandoc.log'\nPANDOC_LOG_STATE = False\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\n\nGEOIP_PATH = os.path.join(BASE_DIR, 'geodata')\n\n# Fake mails (in console)\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.DEBUG: 'debug',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'alert',\n}\n\nSDZ_TUTO_DIR = ''\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'conf/locale/'),\n)\n\nZDS_APP = {\n 'site': {\n 'name': u\"ZesteDeSavoir\",\n 'litteral_name': u\"Zeste de Savoir\",\n 'slogan': u\"Zeste de Savoir, la connaissance pour tous et sans pépins\",\n 'abbr': u\"zds\",\n 'url': u\"http://127.0.0.1:8000\",\n 'dns': u\"zestedesavoir.com\",\n 'email_contact': u\"communication@zestedesavoir.com\",\n 'email_noreply': u\"noreply@zestedesavoir.com\",\n 'repository': u\"https://github.com/zestedesavoir/zds-site\",\n 'bugtracker': u\"https://github.com/zestedesavoir/zds-site/issues\",\n 'forum_feedback_users': u\"/forums/communaute/bug-suggestions/\",\n 'short_description': u\"\",\n 'long_description': u\"Zeste de Savoir est un site de partage de connaissances \"\n u\"sur lequel vous trouverez des tutoriels de tous niveaux, \"\n u\"des articles et des forums d'entraide animés par et pour \"\n u\"la communauté.\",\n 'association': {\n 'name': u\"Zeste de Savoir\",\n 'fee': u\"30 €\",\n 'email': u\"association@zestedesavoir.com\",\n 'email_ca': u\"ca-zeste-de-savoir@googlegroups.com\"\n },\n 'licenses': {\n 'logo': {\n 'code': u\"CC-BY\",\n 'title': u\"Creative Commons License\",\n 'description': u\"Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - \"\n u\"Partage dans les Mêmes Conditions 4.0 International.\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\",\n 'author': u\"MaxRoyo\"\n },\n 'cookies': {\n 'code': u\"CC-BY\",\n 'title': u\"Licence Creative Commons\",\n 'description': u\"licence Creative Commons Attribution 4.0 International\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\"\n },\n 'source': {\n 'code': u\"GPL v3\",\n 'url_license': u\"http://www.gnu.org/licenses/gpl-3.0.html\",\n 'provider_name': u\"Progdupeupl\",\n 'provider_url': u\"http://pdp.microjoe.org\",\n },\n 'licence_info_title': u'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n u'ences-sur-zeste-de-savoir/',\n 'licence_info_link': u'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': u\"OVH\",\n 'address': u\"2 rue Kellermann - 59100 Roubaix - France\"\n },\n 'social': {\n 'facebook': u'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': u'https://twitter.com/ZesteDeSavoir',\n 'googleplus': u'https://plus.google.com/u/0/107033688356682807298'\n },\n 'cnil': u\"1771020\",\n },\n 'member': {\n 'bot_account': u\"admin\",\n 'anonymous_account': u\"anonymous\",\n 'external_account': u\"external\",\n 'bot_group': u'bot',\n 'members_per_page': 100,\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n },\n 'article': {\n 'home_number': 5,\n 'repo_path': os.path.join(BASE_DIR, 'articles-data')\n },\n 'tutorial': {\n 'repo_path': os.path.join(BASE_DIR, 'tutoriels-private'),\n 'repo_public_path': os.path.join(BASE_DIR, 'tutoriels-public'),\n 'default_license_pk': 7,\n 'home_number': 5,\n 'helps_per_page': 20\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'followed_topics_per_page': 21,\n 'beta_forum_id': 1,\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n },\n 'paginator': {\n 'folding_limit': 4\n }\n}\n\nLOGIN_REDIRECT_URL = \"/\"\n\nAUTHENTICATION_BACKENDS = ('social.backends.facebook.FacebookOAuth2',\n 'social.backends.google.GoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend')\nSOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True\n\nSOCIAL_AUTH_PIPELINE = (\n 'social.pipeline.social_auth.social_details',\n 'social.pipeline.social_auth.social_uid',\n 'social.pipeline.social_auth.auth_allowed',\n 'social.pipeline.social_auth.social_user',\n 'social.pipeline.user.get_username',\n 'social.pipeline.user.create_user',\n 'zds.member.models.save_profile',\n 'social.pipeline.social_auth.associate_user',\n 'social.pipeline.social_auth.load_extra_data',\n 'social.pipeline.user.user_details'\n)\n\n# redefine for real key and secret code\nSOCIAL_AUTH_FACEBOOK_KEY = \"\"\nSOCIAL_AUTH_FACEBOOK_SECRET = \"\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = \"mApWNh3stCsYHwsGuWdbZWP8\"\n\n# To remove a useless warning in Django 1.7.\n# See http://daniel.hepper.net/blog/2014/04/fixing-1_6-w001-when-upgrading-from-django-1-5-to-1-7/\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Load the production settings, overwrite the existing ones if needed\ntry:\n from settings_prod import *\nexcept ImportError:\n pass\n\n","path":"zds/settings.py"}],"string":"[\n {\n \"content\": \"# coding: utf-8\\n\\nimport os\\n\\nfrom django.utils.translation import gettext_lazy as _\\n\\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\\n\\nDEBUG = True\\nTEMPLATE_DEBUG = DEBUG\\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\\n\\nDATABASES = {\\n 'default': {\\n 'ENGINE': 'django.db.backends.sqlite3',\\n 'NAME': os.path.join(BASE_DIR, 'base.db'),\\n 'USER': '',\\n 'PASSWORD': '',\\n 'HOST': '',\\n 'PORT': '',\\n }\\n}\\n\\n# Local time zone for this installation. Choices can be found here:\\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\\n# although not all choices may be available on all operating systems.\\n# In a Windows environment this must be set to your system time zone.\\nTIME_ZONE = 'Europe/Paris'\\n\\n# Language code for this installation. All choices can be found here:\\n# http://www.i18nguy.com/unicode/language-identifiers.html\\nLANGUAGE_CODE = 'fr-fr'\\n\\n# If you set this to False, Django will make some optimizations so as not\\n# to load the internationalization machinery.\\nUSE_I18N = True\\n\\n# If you set this to False, Django will not format dates, numbers and\\n# calendars according to the current locale.\\nUSE_L10N = False\\n\\n# If you set this to False, Django will not use timezone-aware datetimes.\\nUSE_TZ = False\\n\\nLANGUAGES = (\\n ('fr', _('Français')),\\n ('en', _('Anglais')),\\n)\\n\\n# Absolute filesystem path to the directory that will hold user-uploaded files.\\n# Example: \\\"/home/media/media.lawrence.com/media/\\\"\\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\\n\\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\\n# trailing slash.\\n# Examples: \\\"http://media.lawrence.com/media/\\\", \\\"http://example.com/media/\\\"\\nMEDIA_URL = '/media/'\\n\\n# Absolute path to the directory static files should be collected to.\\n# Don't put anything in this directory yourself; store your static files\\n# in apps' \\\"static/\\\" subdirectories and in STATICFILES_DIRS.\\n# Example: \\\"/home/media/media.lawrence.com/static/\\\"\\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\\n\\n# URL prefix for static files.\\n# Example: \\\"http://media.lawrence.com/static/\\\"\\nSTATIC_URL = '/static/'\\n\\n# Additional locations of static files\\nSTATICFILES_DIRS = (\\n # Put strings here, like \\\"/home/html/static\\\" or \\\"C:/www/django/static\\\".\\n # Always use forward slashes, even on Windows.\\n # Don't forget to use absolute paths, not relative paths.\\n os.path.join(BASE_DIR, 'dist'),\\n)\\n\\n# List of finder classes that know how to find static files in\\n# various locations.\\nSTATICFILES_FINDERS = (\\n 'django.contrib.staticfiles.finders.FileSystemFinder',\\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\\n)\\n\\nFIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'))\\n\\n# Make this unique, and don't share it with anybody.\\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&amp;y49uc#t=*wvi(9hnyii0z'\\n\\nFILE_UPLOAD_HANDLERS = (\\n \\\"django.core.files.uploadhandler.MemoryFileUploadHandler\\\",\\n \\\"django.core.files.uploadhandler.TemporaryFileUploadHandler\\\",\\n)\\n\\nMIDDLEWARE_CLASSES = (\\n # CorsMiddleware needs to be before CommonMiddleware.\\n 'corsheaders.middleware.CorsMiddleware',\\n 'django.middleware.common.CommonMiddleware',\\n 'django.contrib.sessions.middleware.SessionMiddleware',\\n 'django.middleware.csrf.CsrfViewMiddleware',\\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\\n 'django.contrib.messages.middleware.MessageMiddleware',\\n # Uncomment the next line for simple clickjacking protection:\\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\\n 'zds.utils.ThreadLocals',\\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\\n 'zds.middlewares.profile.ProfileMiddleware',\\n)\\n\\nROOT_URLCONF = 'zds.urls'\\n\\n# Python dotted path to the WSGI application used by Django's runserver.\\nWSGI_APPLICATION = 'zds.wsgi.application'\\n\\nTEMPLATE_DIRS = [\\n # Put strings here, like \\\"/home/html/django_templates\\\" or \\\"C:/www/django/templates\\\".\\n # Always use forward slashes, even on Windows.\\n # Don't forget to use absolute paths, not relative paths.\\n os.path.join(BASE_DIR, 'templates')\\n]\\n\\nTEMPLATE_CONTEXT_PROCESSORS = (\\n # Default context processors\\n 'django.contrib.auth.context_processors.auth',\\n 'django.core.context_processors.debug',\\n 'django.core.context_processors.i18n',\\n 'django.core.context_processors.media',\\n 'django.core.context_processors.static',\\n 'django.core.context_processors.request',\\n 'django.core.context_processors.tz',\\n 'django.contrib.messages.context_processors.messages',\\n 'social.apps.django_app.context_processors.backends',\\n 'social.apps.django_app.context_processors.login_redirect',\\n # ZDS context processors\\n 'zds.utils.context_processor.app_settings',\\n 'zds.utils.context_processor.git_version',\\n)\\n\\nCRISPY_TEMPLATE_PACK = 'bootstrap'\\n\\nINSTALLED_APPS = (\\n 'django.contrib.auth',\\n 'django.contrib.contenttypes',\\n 'django.contrib.sessions',\\n 'django.contrib.sites',\\n 'django.contrib.messages',\\n 'django.contrib.staticfiles',\\n 'django.contrib.sitemaps',\\n 'django.contrib.humanize',\\n\\n 'easy_thumbnails',\\n 'easy_thumbnails.optimize',\\n 'crispy_forms',\\n 'haystack',\\n 'munin',\\n 'social.apps.django_app.default',\\n 'rest_framework',\\n 'rest_framework_swagger',\\n 'corsheaders',\\n 'oauth2_provider',\\n\\n # Apps DB tables are created in THIS order by default\\n # --> Order is CRITICAL to properly handle foreign keys\\n 'zds.utils',\\n 'zds.pages',\\n 'zds.gallery',\\n 'zds.mp',\\n 'zds.article',\\n 'zds.forum',\\n 'zds.tutorial',\\n 'zds.member',\\n # Uncomment the next line to enable the admin:\\n 'django.contrib.admin',\\n # Uncomment the next line to enable admin documentation:\\n # 'django.contrib.admindocs',\\n)\\n\\nTHUMBNAIL_ALIASES = {\\n '': {\\n 'avatar': {'size': (60, 60), 'crop': True},\\n 'avatar_mini': {'size': (24, 24), 'crop': True},\\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\\n 'article_illu': {'size': (60, 60), 'crop': True},\\n 'help_illu': {'size': (48, 48), 'crop': True},\\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\\n 'gallery': {'size': (120, 120), 'crop': True},\\n 'content': {'size': (960, 960), 'crop': False},\\n },\\n}\\n\\nREST_FRAMEWORK = {\\n # If the pagination isn't specify in the API, its configuration is\\n # specified here.\\n 'PAGINATE_BY': 10, # Default to 10\\n 'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.\\n 'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?page_size=xxx`.\\n # Active OAuth2 authentication.\\n 'DEFAULT_AUTHENTICATION_CLASSES': (\\n 'oauth2_provider.ext.rest_framework.OAuth2Authentication',\\n ),\\n 'DEFAULT_PARSER_CLASSES': (\\n 'rest_framework.parsers.JSONParser',\\n #'rest_framework.parsers.XMLParser',\\n 'rest_framework_xml.parsers.XMLParser',\\n 'rest_framework.parsers.FormParser',\\n 'rest_framework.parsers.MultiPartParser',\\n ),\\n 'DEFAULT_RENDERER_CLASSES': (\\n 'rest_framework.renderers.JSONRenderer',\\n #'rest_framework.renderers.XMLRenderer',\\n 'rest_framework_xml.renderers.XMLRenderer',\\n 'rest_framework.renderers.BrowsableAPIRenderer',\\n ),\\n 'DEFAULT_THROTTLE_CLASSES': (\\n 'rest_framework.throttling.AnonRateThrottle',\\n 'rest_framework.throttling.UserRateThrottle'\\n ),\\n 'DEFAULT_THROTTLE_RATES': {\\n 'anon': '60/hour',\\n 'user': '2000/hour'\\n }\\n}\\n\\nREST_FRAMEWORK_EXTENSIONS = {\\n # If the cache isn't specify in the API, the time of the cache\\n # is specified here in seconds.\\n 'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15\\n}\\n\\nSWAGGER_SETTINGS = {\\n 'enabled_methods': [\\n 'get',\\n 'post',\\n 'put',\\n 'delete'\\n ]\\n}\\n\\nCORS_ORIGIN_ALLOW_ALL = True\\n\\nCORS_ALLOW_METHODS = (\\n 'GET',\\n 'POST',\\n 'PUT',\\n 'DELETE',\\n)\\n\\nCORS_ALLOW_HEADERS = (\\n 'x-requested-with',\\n 'content-type',\\n 'accept',\\n 'origin',\\n 'authorization',\\n 'x-csrftoken',\\n 'x-data-format'\\n)\\n\\nCORS_EXPOSE_HEADERS = (\\n 'etag',\\n 'link'\\n)\\n\\nif (DEBUG):\\n INSTALLED_APPS += (\\n 'debug_toolbar',\\n )\\n\\n# A sample logging configuration. The only tangible logging\\n# performed by this configuration is to send an email to\\n# the site admins on every HTTP 500 error when DEBUG=False.\\n# See http://docs.djangoproject.com/en/dev/topics/logging for\\n# more details on how to customize your logging configuration.\\nLOGGING = {\\n 'version': 1,\\n 'disable_existing_loggers': False,\\n 'filters': {\\n 'require_debug_false': {\\n '()': 'django.utils.log.RequireDebugFalse'\\n }\\n },\\n 'handlers': {\\n 'mail_admins': {\\n 'level': 'ERROR',\\n 'filters': ['require_debug_false'],\\n 'class': 'django.utils.log.AdminEmailHandler'\\n }\\n },\\n 'loggers': {\\n 'django.request': {\\n 'handlers': ['mail_admins'],\\n 'level': 'ERROR',\\n 'propagate': True,\\n },\\n }\\n}\\n\\nCACHES = {\\n 'default': {\\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\\n 'LOCATION': '127.0.0.1:11211',\\n }\\n}\\n\\nSESSION_ENGINE = \\\"django.contrib.sessions.backends.cached_db\\\"\\n\\nLOGIN_URL = '/membres/connexion'\\n\\nABSOLUTE_URL_OVERRIDES = {\\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\\n}\\n\\n\\n# Django fileserve settings (set to True for local dev version only)\\nSERVE = False\\n\\nPANDOC_LOC = ''\\nPANDOC_PDF_PARAM = \\\"--latex-engine=xelatex --template=../../assets/tex/template.tex -s -S -N --toc -V documentclass=scrbook -V lang=francais -V mainfont=Merriweather -V monofont=\\\\\\\"Andale Mono\\\\\\\" -V fontsize=12pt -V geometry:margin=1in \\\"\\n# LOG PATH FOR PANDOC LOGGING\\nPANDOC_LOG = './pandoc.log'\\nPANDOC_LOG_STATE = False\\n\\nHAYSTACK_CONNECTIONS = {\\n 'default': {\\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\\n 'URL': 'http://127.0.0.1:8983/solr'\\n # ...or for multicore...\\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\\n },\\n}\\n\\nGEOIP_PATH = os.path.join(BASE_DIR, 'geodata')\\n\\n# Fake mails (in console)\\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\\n\\nfrom django.contrib.messages import constants as message_constants\\nMESSAGE_TAGS = {\\n message_constants.DEBUG: 'debug',\\n message_constants.INFO: 'info',\\n message_constants.SUCCESS: 'success',\\n message_constants.WARNING: 'warning',\\n message_constants.ERROR: 'alert',\\n}\\n\\nSDZ_TUTO_DIR = ''\\n\\nLOCALE_PATHS = (\\n os.path.join(BASE_DIR, 'conf/locale/'),\\n)\\n\\nZDS_APP = {\\n 'site': {\\n 'name': u\\\"ZesteDeSavoir\\\",\\n 'litteral_name': u\\\"Zeste de Savoir\\\",\\n 'slogan': u\\\"Zeste de Savoir, la connaissance pour tous et sans pépins\\\",\\n 'abbr': u\\\"zds\\\",\\n 'url': u\\\"http://127.0.0.1:8000\\\",\\n 'dns': u\\\"zestedesavoir.com\\\",\\n 'email_contact': u\\\"communication@zestedesavoir.com\\\",\\n 'email_noreply': u\\\"noreply@zestedesavoir.com\\\",\\n 'repository': u\\\"https://github.com/zestedesavoir/zds-site\\\",\\n 'bugtracker': u\\\"https://github.com/zestedesavoir/zds-site/issues\\\",\\n 'forum_feedback_users': u\\\"/forums/communaute/bug-suggestions/\\\",\\n 'short_description': u\\\"\\\",\\n 'long_description': u\\\"Zeste de Savoir est un site de partage de connaissances \\\"\\n u\\\"sur lequel vous trouverez des tutoriels de tous niveaux, \\\"\\n u\\\"des articles et des forums d'entraide animés par et pour \\\"\\n u\\\"la communauté.\\\",\\n 'association': {\\n 'name': u\\\"Zeste de Savoir\\\",\\n 'fee': u\\\"30 €\\\",\\n 'email': u\\\"association@zestedesavoir.com\\\",\\n 'email_ca': u\\\"ca-zeste-de-savoir@googlegroups.com\\\"\\n },\\n 'licenses': {\\n 'logo': {\\n 'code': u\\\"CC-BY\\\",\\n 'title': u\\\"Creative Commons License\\\",\\n 'description': u\\\"Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - \\\"\\n u\\\"Partage dans les Mêmes Conditions 4.0 International.\\\",\\n 'url_image': u\\\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\\\",\\n 'url_license': u\\\"http://creativecommons.org/licenses/by-nc-sa/4.0/\\\",\\n 'author': u\\\"MaxRoyo\\\"\\n },\\n 'cookies': {\\n 'code': u\\\"CC-BY\\\",\\n 'title': u\\\"Licence Creative Commons\\\",\\n 'description': u\\\"licence Creative Commons Attribution 4.0 International\\\",\\n 'url_image': u\\\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\\\",\\n 'url_license': u\\\"http://creativecommons.org/licenses/by-nc-sa/4.0/\\\"\\n },\\n 'source': {\\n 'code': u\\\"GPL v3\\\",\\n 'url_license': u\\\"http://www.gnu.org/licenses/gpl-3.0.html\\\",\\n 'provider_name': u\\\"Progdupeupl\\\",\\n 'provider_url': u\\\"http://pdp.microjoe.org\\\",\\n },\\n 'licence_info_title': u'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\\n u'ences-sur-zeste-de-savoir/',\\n 'licence_info_link': u'Le droit d\\\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\\n },\\n 'hosting': {\\n 'name': u\\\"OVH\\\",\\n 'address': u\\\"2 rue Kellermann - 59100 Roubaix - France\\\"\\n },\\n 'social': {\\n 'facebook': u'https://www.facebook.com/ZesteDeSavoir',\\n 'twitter': u'https://twitter.com/ZesteDeSavoir',\\n 'googleplus': u'https://plus.google.com/u/0/107033688356682807298'\\n },\\n 'cnil': u\\\"1771020\\\",\\n },\\n 'member': {\\n 'bot_account': u\\\"admin\\\",\\n 'anonymous_account': u\\\"anonymous\\\",\\n 'external_account': u\\\"external\\\",\\n 'bot_group': u'bot',\\n 'members_per_page': 100,\\n },\\n 'gallery': {\\n 'image_max_size': 1024 * 1024,\\n },\\n 'article': {\\n 'home_number': 5,\\n 'repo_path': os.path.join(BASE_DIR, 'articles-data')\\n },\\n 'tutorial': {\\n 'repo_path': os.path.join(BASE_DIR, 'tutoriels-private'),\\n 'repo_public_path': os.path.join(BASE_DIR, 'tutoriels-public'),\\n 'default_license_pk': 7,\\n 'home_number': 5,\\n 'helps_per_page': 20\\n },\\n 'forum': {\\n 'posts_per_page': 21,\\n 'topics_per_page': 21,\\n 'spam_limit_seconds': 60 * 15,\\n 'spam_limit_participant': 2,\\n 'followed_topics_per_page': 21,\\n 'beta_forum_id': 1,\\n 'max_post_length': 1000000,\\n 'top_tag_max': 5,\\n 'home_number': 5,\\n },\\n 'paginator': {\\n 'folding_limit': 4\\n }\\n}\\n\\nLOGIN_REDIRECT_URL = \\\"/\\\"\\n\\nAUTHENTICATION_BACKENDS = ('social.backends.facebook.FacebookOAuth2',\\n 'social.backends.google.GoogleOAuth2',\\n 'django.contrib.auth.backends.ModelBackend')\\nSOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True\\n\\nSOCIAL_AUTH_PIPELINE = (\\n 'social.pipeline.social_auth.social_details',\\n 'social.pipeline.social_auth.social_uid',\\n 'social.pipeline.social_auth.auth_allowed',\\n 'social.pipeline.social_auth.social_user',\\n 'social.pipeline.user.get_username',\\n 'social.pipeline.user.create_user',\\n 'zds.member.models.save_profile',\\n 'social.pipeline.social_auth.associate_user',\\n 'social.pipeline.social_auth.load_extra_data',\\n 'social.pipeline.user.user_details'\\n)\\n\\n# redefine for real key and secret code\\nSOCIAL_AUTH_FACEBOOK_KEY = \\\"\\\"\\nSOCIAL_AUTH_FACEBOOK_SECRET = \\\"\\\"\\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \\\"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\\\"\\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = \\\"mApWNh3stCsYHwsGuWdbZWP8\\\"\\n\\n# To remove a useless warning in Django 1.7.\\n# See http://daniel.hepper.net/blog/2014/04/fixing-1_6-w001-when-upgrading-from-django-1-5-to-1-7/\\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\\n\\n# Load the production settings, overwrite the existing ones if needed\\ntry:\\n from settings_prod import *\\nexcept ImportError:\\n pass\\n\\n\",\n \"path\": \"zds/settings.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# coding: utf-8\n\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'base.db'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Paris'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'fr-fr'\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = False\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\nLANGUAGES = (\n ('fr', _('Français')),\n ('en', _('Anglais')),\n)\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'dist'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nFIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'))\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&amp;y49uc#t=*wvi(9hnyii0z'\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\nMIDDLEWARE_CLASSES = (\n # CorsMiddleware needs to be before CommonMiddleware.\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'zds.utils.ThreadLocals',\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\n 'zds.middlewares.profile.ProfileMiddleware',\n)\n\nROOT_URLCONF = 'zds.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'zds.wsgi.application'\n\nTEMPLATE_DIRS = [\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'templates')\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n # Default context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n # ZDS context processors\n 'zds.utils.context_processor.app_settings',\n 'zds.utils.context_processor.git_version',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n\n 'easy_thumbnails',\n 'easy_thumbnails.optimize',\n 'crispy_forms',\n 'haystack',\n 'munin',\n 'social.apps.django_app.default',\n 'rest_framework',\n 'rest_framework_swagger',\n 'corsheaders',\n 'oauth2_provider',\n\n # Apps DB tables are created in THIS order by default\n # --> Order is CRITICAL to properly handle foreign keys\n 'zds.utils',\n 'zds.pages',\n 'zds.gallery',\n 'zds.mp',\n 'zds.article',\n 'zds.forum',\n 'zds.tutorial',\n 'zds.member',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\n\nSITE_ID = 1\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n },\n}\n\nREST_FRAMEWORK = {\n # If the pagination isn't specify in the API, its configuration is\n # specified here.\n 'PAGINATE_BY': 10, # Default to 10\n 'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.\n 'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?page_size=xxx`.\n # Active OAuth2 authentication.\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'oauth2_provider.ext.rest_framework.OAuth2Authentication',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n #'rest_framework.parsers.XMLParser',\n 'rest_framework_xml.parsers.XMLParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n #'rest_framework.renderers.XMLRenderer',\n 'rest_framework_xml.renderers.XMLRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle'\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'anon': '60/hour',\n 'user': '2000/hour'\n }\n}\n\nREST_FRAMEWORK_EXTENSIONS = {\n # If the cache isn't specify in the API, the time of the cache\n # is specified here in seconds.\n 'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15\n}\n\nSWAGGER_SETTINGS = {\n 'enabled_methods': [\n 'get',\n 'post',\n 'put',\n 'delete'\n ]\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_METHODS = (\n 'GET',\n 'POST',\n 'PUT',\n 'DELETE',\n)\n\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'x-data-format'\n)\n\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'link'\n)\n\nif (DEBUG):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nLOGIN_URL = '/membres/connexion'\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\n}\n\n\n# Django fileserve settings (set to True for local dev version only)\nSERVE = False\n\nPANDOC_LOC = ''\nPANDOC_PDF_PARAM = \"--latex-engine=xelatex --template=../../assets/tex/template.tex -s -S -N --toc -V documentclass=scrbook -V lang=francais -V mainfont=Merriweather -V monofont=\\\"Andale Mono\\\" -V fontsize=12pt -V geometry:margin=1in \"\n# LOG PATH FOR PANDOC LOGGING\nPANDOC_LOG = './pandoc.log'\nPANDOC_LOG_STATE = False\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\n\nGEOIP_PATH = os.path.join(BASE_DIR, 'geodata')\n\n# Fake mails (in console)\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.DEBUG: 'debug',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'alert',\n}\n\nSDZ_TUTO_DIR = ''\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'conf/locale/'),\n)\n\nZDS_APP = {\n 'site': {\n 'name': u\"ZesteDeSavoir\",\n 'litteral_name': u\"Zeste de Savoir\",\n 'slogan': u\"Zeste de Savoir, la connaissance pour tous et sans pépins\",\n 'abbr': u\"zds\",\n 'url': u\"http://127.0.0.1:8000\",\n 'dns': u\"zestedesavoir.com\",\n 'email_contact': u\"communication@zestedesavoir.com\",\n 'email_noreply': u\"noreply@zestedesavoir.com\",\n 'repository': u\"https://github.com/zestedesavoir/zds-site\",\n 'bugtracker': u\"https://github.com/zestedesavoir/zds-site/issues\",\n 'forum_feedback_users': u\"/forums/communaute/bug-suggestions/\",\n 'short_description': u\"\",\n 'long_description': u\"Zeste de Savoir est un site de partage de connaissances \"\n u\"sur lequel vous trouverez des tutoriels de tous niveaux, \"\n u\"des articles et des forums d'entraide animés par et pour \"\n u\"la communauté.\",\n 'association': {\n 'name': u\"Zeste de Savoir\",\n 'fee': u\"30 €\",\n 'email': u\"association@zestedesavoir.com\",\n 'email_ca': u\"ca-zeste-de-savoir@googlegroups.com\"\n },\n 'licenses': {\n 'logo': {\n 'code': u\"CC-BY\",\n 'title': u\"Creative Commons License\",\n 'description': u\"Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - \"\n u\"Partage dans les Mêmes Conditions 4.0 International.\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\",\n 'author': u\"MaxRoyo\"\n },\n 'cookies': {\n 'code': u\"CC-BY\",\n 'title': u\"Licence Creative Commons\",\n 'description': u\"licence Creative Commons Attribution 4.0 International\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\"\n },\n 'source': {\n 'code': u\"GPL v3\",\n 'url_license': u\"http://www.gnu.org/licenses/gpl-3.0.html\",\n 'provider_name': u\"Progdupeupl\",\n 'provider_url': u\"http://pdp.microjoe.org\",\n },\n 'licence_info_title': u'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n u'ences-sur-zeste-de-savoir/',\n 'licence_info_link': u'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': u\"OVH\",\n 'address': u\"2 rue Kellermann - 59100 Roubaix - France\"\n },\n 'social': {\n 'facebook': u'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': u'https://twitter.com/ZesteDeSavoir',\n 'googleplus': u'https://plus.google.com/u/0/107033688356682807298'\n },\n 'cnil': u\"1771020\",\n },\n 'member': {\n 'bot_account': u\"admin\",\n 'anonymous_account': u\"anonymous\",\n 'external_account': u\"external\",\n 'bot_group': u'bot',\n 'members_per_page': 100,\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n },\n 'article': {\n 'home_number': 5,\n 'repo_path': os.path.join(BASE_DIR, 'articles-data')\n },\n 'tutorial': {\n 'repo_path': os.path.join(BASE_DIR, 'tutoriels-private'),\n 'repo_public_path': os.path.join(BASE_DIR, 'tutoriels-public'),\n 'default_license_pk': 7,\n 'home_number': 5,\n 'helps_per_page': 20\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'followed_topics_per_page': 21,\n 'beta_forum_id': 1,\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n },\n 'paginator': {\n 'folding_limit': 4\n }\n}\n\nLOGIN_REDIRECT_URL = \"/\"\n\nAUTHENTICATION_BACKENDS = ('social.backends.facebook.FacebookOAuth2',\n 'social.backends.google.GoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend')\nSOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True\n\nSOCIAL_AUTH_PIPELINE = (\n 'social.pipeline.social_auth.social_details',\n 'social.pipeline.social_auth.social_uid',\n 'social.pipeline.social_auth.auth_allowed',\n 'social.pipeline.social_auth.social_user',\n 'social.pipeline.user.get_username',\n 'social.pipeline.user.create_user',\n 'zds.member.models.save_profile',\n 'social.pipeline.social_auth.associate_user',\n 'social.pipeline.social_auth.load_extra_data',\n 'social.pipeline.user.user_details'\n)\n\n# redefine for real key and secret code\nSOCIAL_AUTH_FACEBOOK_KEY = \"\"\nSOCIAL_AUTH_FACEBOOK_SECRET = \"\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = \"mApWNh3stCsYHwsGuWdbZWP8\"\n\n# To remove a useless warning in Django 1.7.\n# See http://daniel.hepper.net/blog/2014/04/fixing-1_6-w001-when-upgrading-from-django-1-5-to-1-7/\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Load the production settings, overwrite the existing ones if needed\ntry:\n from settings_prod import *\nexcept ImportError:\n pass\n\n","path":"zds/settings.py"}],"string":"[\n {\n \"content\": \"# coding: utf-8\\n\\nimport os\\n\\nfrom django.utils.translation import gettext_lazy as _\\n\\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\\n\\nDEBUG = True\\nTEMPLATE_DEBUG = DEBUG\\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\\n\\nDATABASES = {\\n 'default': {\\n 'ENGINE': 'django.db.backends.sqlite3',\\n 'NAME': os.path.join(BASE_DIR, 'base.db'),\\n 'USER': '',\\n 'PASSWORD': '',\\n 'HOST': '',\\n 'PORT': '',\\n }\\n}\\n\\n# Local time zone for this installation. Choices can be found here:\\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\\n# although not all choices may be available on all operating systems.\\n# In a Windows environment this must be set to your system time zone.\\nTIME_ZONE = 'Europe/Paris'\\n\\n# Language code for this installation. All choices can be found here:\\n# http://www.i18nguy.com/unicode/language-identifiers.html\\nLANGUAGE_CODE = 'fr-fr'\\n\\n# If you set this to False, Django will make some optimizations so as not\\n# to load the internationalization machinery.\\nUSE_I18N = True\\n\\n# If you set this to False, Django will not format dates, numbers and\\n# calendars according to the current locale.\\nUSE_L10N = False\\n\\n# If you set this to False, Django will not use timezone-aware datetimes.\\nUSE_TZ = False\\n\\nLANGUAGES = (\\n ('fr', _('Français')),\\n ('en', _('Anglais')),\\n)\\n\\n# Absolute filesystem path to the directory that will hold user-uploaded files.\\n# Example: \\\"/home/media/media.lawrence.com/media/\\\"\\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\\n\\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\\n# trailing slash.\\n# Examples: \\\"http://media.lawrence.com/media/\\\", \\\"http://example.com/media/\\\"\\nMEDIA_URL = '/media/'\\n\\n# Absolute path to the directory static files should be collected to.\\n# Don't put anything in this directory yourself; store your static files\\n# in apps' \\\"static/\\\" subdirectories and in STATICFILES_DIRS.\\n# Example: \\\"/home/media/media.lawrence.com/static/\\\"\\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\\n\\n# URL prefix for static files.\\n# Example: \\\"http://media.lawrence.com/static/\\\"\\nSTATIC_URL = '/static/'\\n\\n# Additional locations of static files\\nSTATICFILES_DIRS = (\\n # Put strings here, like \\\"/home/html/static\\\" or \\\"C:/www/django/static\\\".\\n # Always use forward slashes, even on Windows.\\n # Don't forget to use absolute paths, not relative paths.\\n os.path.join(BASE_DIR, 'dist'),\\n)\\n\\n# List of finder classes that know how to find static files in\\n# various locations.\\nSTATICFILES_FINDERS = (\\n 'django.contrib.staticfiles.finders.FileSystemFinder',\\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\\n)\\n\\nFIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'))\\n\\n# Make this unique, and don't share it with anybody.\\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&amp;y49uc#t=*wvi(9hnyii0z'\\n\\nFILE_UPLOAD_HANDLERS = (\\n \\\"django.core.files.uploadhandler.MemoryFileUploadHandler\\\",\\n \\\"django.core.files.uploadhandler.TemporaryFileUploadHandler\\\",\\n)\\n\\nMIDDLEWARE_CLASSES = (\\n # CorsMiddleware needs to be before CommonMiddleware.\\n 'corsheaders.middleware.CorsMiddleware',\\n 'django.middleware.common.CommonMiddleware',\\n 'django.contrib.sessions.middleware.SessionMiddleware',\\n 'django.middleware.csrf.CsrfViewMiddleware',\\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\\n 'django.contrib.messages.middleware.MessageMiddleware',\\n # Uncomment the next line for simple clickjacking protection:\\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\\n 'zds.utils.ThreadLocals',\\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\\n 'zds.middlewares.profile.ProfileMiddleware',\\n)\\n\\nROOT_URLCONF = 'zds.urls'\\n\\n# Python dotted path to the WSGI application used by Django's runserver.\\nWSGI_APPLICATION = 'zds.wsgi.application'\\n\\nTEMPLATE_DIRS = [\\n # Put strings here, like \\\"/home/html/django_templates\\\" or \\\"C:/www/django/templates\\\".\\n # Always use forward slashes, even on Windows.\\n # Don't forget to use absolute paths, not relative paths.\\n os.path.join(BASE_DIR, 'templates')\\n]\\n\\nTEMPLATE_CONTEXT_PROCESSORS = (\\n # Default context processors\\n 'django.contrib.auth.context_processors.auth',\\n 'django.core.context_processors.debug',\\n 'django.core.context_processors.i18n',\\n 'django.core.context_processors.media',\\n 'django.core.context_processors.static',\\n 'django.core.context_processors.request',\\n 'django.core.context_processors.tz',\\n 'django.contrib.messages.context_processors.messages',\\n 'social.apps.django_app.context_processors.backends',\\n 'social.apps.django_app.context_processors.login_redirect',\\n # ZDS context processors\\n 'zds.utils.context_processor.app_settings',\\n 'zds.utils.context_processor.git_version',\\n)\\n\\nCRISPY_TEMPLATE_PACK = 'bootstrap'\\n\\nINSTALLED_APPS = (\\n 'django.contrib.auth',\\n 'django.contrib.contenttypes',\\n 'django.contrib.sessions',\\n 'django.contrib.sites',\\n 'django.contrib.messages',\\n 'django.contrib.staticfiles',\\n 'django.contrib.sitemaps',\\n 'django.contrib.humanize',\\n\\n 'easy_thumbnails',\\n 'easy_thumbnails.optimize',\\n 'crispy_forms',\\n 'haystack',\\n 'munin',\\n 'social.apps.django_app.default',\\n 'rest_framework',\\n 'rest_framework_swagger',\\n 'corsheaders',\\n 'oauth2_provider',\\n\\n # Apps DB tables are created in THIS order by default\\n # --> Order is CRITICAL to properly handle foreign keys\\n 'zds.utils',\\n 'zds.pages',\\n 'zds.gallery',\\n 'zds.mp',\\n 'zds.article',\\n 'zds.forum',\\n 'zds.tutorial',\\n 'zds.member',\\n # Uncomment the next line to enable the admin:\\n 'django.contrib.admin',\\n # Uncomment the next line to enable admin documentation:\\n # 'django.contrib.admindocs',\\n)\\n\\nSITE_ID = 1\\n\\nTHUMBNAIL_ALIASES = {\\n '': {\\n 'avatar': {'size': (60, 60), 'crop': True},\\n 'avatar_mini': {'size': (24, 24), 'crop': True},\\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\\n 'article_illu': {'size': (60, 60), 'crop': True},\\n 'help_illu': {'size': (48, 48), 'crop': True},\\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\\n 'gallery': {'size': (120, 120), 'crop': True},\\n 'content': {'size': (960, 960), 'crop': False},\\n },\\n}\\n\\nREST_FRAMEWORK = {\\n # If the pagination isn't specify in the API, its configuration is\\n # specified here.\\n 'PAGINATE_BY': 10, # Default to 10\\n 'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.\\n 'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?page_size=xxx`.\\n # Active OAuth2 authentication.\\n 'DEFAULT_AUTHENTICATION_CLASSES': (\\n 'oauth2_provider.ext.rest_framework.OAuth2Authentication',\\n ),\\n 'DEFAULT_PARSER_CLASSES': (\\n 'rest_framework.parsers.JSONParser',\\n #'rest_framework.parsers.XMLParser',\\n 'rest_framework_xml.parsers.XMLParser',\\n 'rest_framework.parsers.FormParser',\\n 'rest_framework.parsers.MultiPartParser',\\n ),\\n 'DEFAULT_RENDERER_CLASSES': (\\n 'rest_framework.renderers.JSONRenderer',\\n #'rest_framework.renderers.XMLRenderer',\\n 'rest_framework_xml.renderers.XMLRenderer',\\n 'rest_framework.renderers.BrowsableAPIRenderer',\\n ),\\n 'DEFAULT_THROTTLE_CLASSES': (\\n 'rest_framework.throttling.AnonRateThrottle',\\n 'rest_framework.throttling.UserRateThrottle'\\n ),\\n 'DEFAULT_THROTTLE_RATES': {\\n 'anon': '60/hour',\\n 'user': '2000/hour'\\n }\\n}\\n\\nREST_FRAMEWORK_EXTENSIONS = {\\n # If the cache isn't specify in the API, the time of the cache\\n # is specified here in seconds.\\n 'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15\\n}\\n\\nSWAGGER_SETTINGS = {\\n 'enabled_methods': [\\n 'get',\\n 'post',\\n 'put',\\n 'delete'\\n ]\\n}\\n\\nCORS_ORIGIN_ALLOW_ALL = True\\n\\nCORS_ALLOW_METHODS = (\\n 'GET',\\n 'POST',\\n 'PUT',\\n 'DELETE',\\n)\\n\\nCORS_ALLOW_HEADERS = (\\n 'x-requested-with',\\n 'content-type',\\n 'accept',\\n 'origin',\\n 'authorization',\\n 'x-csrftoken',\\n 'x-data-format'\\n)\\n\\nCORS_EXPOSE_HEADERS = (\\n 'etag',\\n 'link'\\n)\\n\\nif (DEBUG):\\n INSTALLED_APPS += (\\n 'debug_toolbar',\\n )\\n\\n# A sample logging configuration. The only tangible logging\\n# performed by this configuration is to send an email to\\n# the site admins on every HTTP 500 error when DEBUG=False.\\n# See http://docs.djangoproject.com/en/dev/topics/logging for\\n# more details on how to customize your logging configuration.\\nLOGGING = {\\n 'version': 1,\\n 'disable_existing_loggers': False,\\n 'filters': {\\n 'require_debug_false': {\\n '()': 'django.utils.log.RequireDebugFalse'\\n }\\n },\\n 'handlers': {\\n 'mail_admins': {\\n 'level': 'ERROR',\\n 'filters': ['require_debug_false'],\\n 'class': 'django.utils.log.AdminEmailHandler'\\n }\\n },\\n 'loggers': {\\n 'django.request': {\\n 'handlers': ['mail_admins'],\\n 'level': 'ERROR',\\n 'propagate': True,\\n },\\n }\\n}\\n\\nCACHES = {\\n 'default': {\\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\\n 'LOCATION': '127.0.0.1:11211',\\n }\\n}\\n\\nSESSION_ENGINE = \\\"django.contrib.sessions.backends.cached_db\\\"\\n\\nLOGIN_URL = '/membres/connexion'\\n\\nABSOLUTE_URL_OVERRIDES = {\\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\\n}\\n\\n\\n# Django fileserve settings (set to True for local dev version only)\\nSERVE = False\\n\\nPANDOC_LOC = ''\\nPANDOC_PDF_PARAM = \\\"--latex-engine=xelatex --template=../../assets/tex/template.tex -s -S -N --toc -V documentclass=scrbook -V lang=francais -V mainfont=Merriweather -V monofont=\\\\\\\"Andale Mono\\\\\\\" -V fontsize=12pt -V geometry:margin=1in \\\"\\n# LOG PATH FOR PANDOC LOGGING\\nPANDOC_LOG = './pandoc.log'\\nPANDOC_LOG_STATE = False\\n\\nHAYSTACK_CONNECTIONS = {\\n 'default': {\\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\\n 'URL': 'http://127.0.0.1:8983/solr'\\n # ...or for multicore...\\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\\n },\\n}\\n\\nGEOIP_PATH = os.path.join(BASE_DIR, 'geodata')\\n\\n# Fake mails (in console)\\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\\n\\nfrom django.contrib.messages import constants as message_constants\\nMESSAGE_TAGS = {\\n message_constants.DEBUG: 'debug',\\n message_constants.INFO: 'info',\\n message_constants.SUCCESS: 'success',\\n message_constants.WARNING: 'warning',\\n message_constants.ERROR: 'alert',\\n}\\n\\nSDZ_TUTO_DIR = ''\\n\\nLOCALE_PATHS = (\\n os.path.join(BASE_DIR, 'conf/locale/'),\\n)\\n\\nZDS_APP = {\\n 'site': {\\n 'name': u\\\"ZesteDeSavoir\\\",\\n 'litteral_name': u\\\"Zeste de Savoir\\\",\\n 'slogan': u\\\"Zeste de Savoir, la connaissance pour tous et sans pépins\\\",\\n 'abbr': u\\\"zds\\\",\\n 'url': u\\\"http://127.0.0.1:8000\\\",\\n 'dns': u\\\"zestedesavoir.com\\\",\\n 'email_contact': u\\\"communication@zestedesavoir.com\\\",\\n 'email_noreply': u\\\"noreply@zestedesavoir.com\\\",\\n 'repository': u\\\"https://github.com/zestedesavoir/zds-site\\\",\\n 'bugtracker': u\\\"https://github.com/zestedesavoir/zds-site/issues\\\",\\n 'forum_feedback_users': u\\\"/forums/communaute/bug-suggestions/\\\",\\n 'short_description': u\\\"\\\",\\n 'long_description': u\\\"Zeste de Savoir est un site de partage de connaissances \\\"\\n u\\\"sur lequel vous trouverez des tutoriels de tous niveaux, \\\"\\n u\\\"des articles et des forums d'entraide animés par et pour \\\"\\n u\\\"la communauté.\\\",\\n 'association': {\\n 'name': u\\\"Zeste de Savoir\\\",\\n 'fee': u\\\"30 €\\\",\\n 'email': u\\\"association@zestedesavoir.com\\\",\\n 'email_ca': u\\\"ca-zeste-de-savoir@googlegroups.com\\\"\\n },\\n 'licenses': {\\n 'logo': {\\n 'code': u\\\"CC-BY\\\",\\n 'title': u\\\"Creative Commons License\\\",\\n 'description': u\\\"Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - \\\"\\n u\\\"Partage dans les Mêmes Conditions 4.0 International.\\\",\\n 'url_image': u\\\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\\\",\\n 'url_license': u\\\"http://creativecommons.org/licenses/by-nc-sa/4.0/\\\",\\n 'author': u\\\"MaxRoyo\\\"\\n },\\n 'cookies': {\\n 'code': u\\\"CC-BY\\\",\\n 'title': u\\\"Licence Creative Commons\\\",\\n 'description': u\\\"licence Creative Commons Attribution 4.0 International\\\",\\n 'url_image': u\\\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\\\",\\n 'url_license': u\\\"http://creativecommons.org/licenses/by-nc-sa/4.0/\\\"\\n },\\n 'source': {\\n 'code': u\\\"GPL v3\\\",\\n 'url_license': u\\\"http://www.gnu.org/licenses/gpl-3.0.html\\\",\\n 'provider_name': u\\\"Progdupeupl\\\",\\n 'provider_url': u\\\"http://pdp.microjoe.org\\\",\\n },\\n 'licence_info_title': u'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\\n u'ences-sur-zeste-de-savoir/',\\n 'licence_info_link': u'Le droit d\\\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\\n },\\n 'hosting': {\\n 'name': u\\\"OVH\\\",\\n 'address': u\\\"2 rue Kellermann - 59100 Roubaix - France\\\"\\n },\\n 'social': {\\n 'facebook': u'https://www.facebook.com/ZesteDeSavoir',\\n 'twitter': u'https://twitter.com/ZesteDeSavoir',\\n 'googleplus': u'https://plus.google.com/u/0/107033688356682807298'\\n },\\n 'cnil': u\\\"1771020\\\",\\n },\\n 'member': {\\n 'bot_account': u\\\"admin\\\",\\n 'anonymous_account': u\\\"anonymous\\\",\\n 'external_account': u\\\"external\\\",\\n 'bot_group': u'bot',\\n 'members_per_page': 100,\\n },\\n 'gallery': {\\n 'image_max_size': 1024 * 1024,\\n },\\n 'article': {\\n 'home_number': 5,\\n 'repo_path': os.path.join(BASE_DIR, 'articles-data')\\n },\\n 'tutorial': {\\n 'repo_path': os.path.join(BASE_DIR, 'tutoriels-private'),\\n 'repo_public_path': os.path.join(BASE_DIR, 'tutoriels-public'),\\n 'default_license_pk': 7,\\n 'home_number': 5,\\n 'helps_per_page': 20\\n },\\n 'forum': {\\n 'posts_per_page': 21,\\n 'topics_per_page': 21,\\n 'spam_limit_seconds': 60 * 15,\\n 'spam_limit_participant': 2,\\n 'followed_topics_per_page': 21,\\n 'beta_forum_id': 1,\\n 'max_post_length': 1000000,\\n 'top_tag_max': 5,\\n 'home_number': 5,\\n },\\n 'paginator': {\\n 'folding_limit': 4\\n }\\n}\\n\\nLOGIN_REDIRECT_URL = \\\"/\\\"\\n\\nAUTHENTICATION_BACKENDS = ('social.backends.facebook.FacebookOAuth2',\\n 'social.backends.google.GoogleOAuth2',\\n 'django.contrib.auth.backends.ModelBackend')\\nSOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True\\n\\nSOCIAL_AUTH_PIPELINE = (\\n 'social.pipeline.social_auth.social_details',\\n 'social.pipeline.social_auth.social_uid',\\n 'social.pipeline.social_auth.auth_allowed',\\n 'social.pipeline.social_auth.social_user',\\n 'social.pipeline.user.get_username',\\n 'social.pipeline.user.create_user',\\n 'zds.member.models.save_profile',\\n 'social.pipeline.social_auth.associate_user',\\n 'social.pipeline.social_auth.load_extra_data',\\n 'social.pipeline.user.user_details'\\n)\\n\\n# redefine for real key and secret code\\nSOCIAL_AUTH_FACEBOOK_KEY = \\\"\\\"\\nSOCIAL_AUTH_FACEBOOK_SECRET = \\\"\\\"\\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \\\"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\\\"\\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = \\\"mApWNh3stCsYHwsGuWdbZWP8\\\"\\n\\n# To remove a useless warning in Django 1.7.\\n# See http://daniel.hepper.net/blog/2014/04/fixing-1_6-w001-when-upgrading-from-django-1-5-to-1-7/\\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\\n\\n# Load the production settings, overwrite the existing ones if needed\\ntry:\\n from settings_prod import *\\nexcept ImportError:\\n pass\\n\\n\",\n \"path\": \"zds/settings.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/zds/settings.py b/zds/settings.py\nindex 3d94cba83f..d0d2f33de1 100644\n--- a/zds/settings.py\n+++ b/zds/settings.py\n@@ -176,6 +176,8 @@\n # 'django.contrib.admindocs',\n )\n \n+SITE_ID = 1\n+\n THUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n"}}},{"rowIdx":415,"cells":{"in_source_id":{"kind":"string","value":"Gallopsled__pwntools-1811"},"issue":{"kind":"string","value":"List comprehension in __all__ prevents Pylance from working\nThanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.\r\n\r\nIf you've got an idea for a new feature, please provide information about:\r\n\r\n* What the feature does\r\nAccording to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).\r\nhttps://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.\r\n* Why the feature should exist\r\nTo make Pylance happy :smile: \r\n* What tests should be included\r\nTest in VS Code to ensure it works.\r\n\r\nIf you think you can write the feature yourself, please submit a Pull Request and we can review your changes!\r\n\n"},"before_files":{"kind":"list like","value":[{"content":"# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n__all__ = [x for x in tuple(globals()) if x != '__name__']\n","path":"pwn/toplevel.py"}],"string":"[\n {\n \"content\": \"# Get all the modules from pwnlib\\nimport collections\\nimport logging\\nimport math\\nimport operator\\nimport os\\nimport platform\\nimport re\\nimport requests\\nimport socks\\nimport signal\\nimport string\\nimport struct\\nimport subprocess\\nimport sys\\nimport tempfile\\nimport threading\\nimport time\\n\\nfrom pprint import pprint\\n\\nimport pwnlib\\nfrom pwnlib import *\\nfrom pwnlib.asm import *\\nfrom pwnlib.context import Thread\\nfrom pwnlib.context import context, LocalContext\\nfrom pwnlib.dynelf import DynELF\\nfrom pwnlib.encoders import *\\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\\nfrom pwnlib.elf.elf import ELF, load\\nfrom pwnlib.encoders import *\\nfrom pwnlib.exception import PwnlibException\\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\\nfrom pwnlib.filepointer import *\\nfrom pwnlib.flag import *\\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\\nfrom pwnlib.log import getLogger\\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\\nfrom pwnlib.regsort import *\\nfrom pwnlib.replacements import *\\nfrom pwnlib.rop import ROP\\nfrom pwnlib.rop.call import AppendedArgument\\nfrom pwnlib.rop.srop import SigreturnFrame\\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\\nfrom pwnlib.runner import *\\nfrom pwnlib.term.readline import str_input\\nfrom pwnlib.timeout import Timeout\\nfrom pwnlib.tubes.listen import listen\\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\\nfrom pwnlib.tubes.serialtube import serialtube\\nfrom pwnlib.tubes.server import server\\nfrom pwnlib.tubes.ssh import ssh\\nfrom pwnlib.tubes.tube import tube\\nfrom pwnlib.ui import *\\nfrom pwnlib.util import crc\\nfrom pwnlib.util import iters\\nfrom pwnlib.util import net\\nfrom pwnlib.util import proc\\nfrom pwnlib.util import safeeval\\nfrom pwnlib.util.crc import BitPolynom\\nfrom pwnlib.util.cyclic import *\\nfrom pwnlib.util.fiddling import *\\nfrom pwnlib.util.getdents import *\\nfrom pwnlib.util.hashes import *\\nfrom pwnlib.util.lists import *\\nfrom pwnlib.util.misc import *\\nfrom pwnlib.util.packing import *\\nfrom pwnlib.util.proc import pidof\\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\\nfrom pwnlib.util.splash import *\\nfrom pwnlib.util.web import *\\n\\n# Promote these modules, so that \\\"from pwn import *\\\" will let you access them\\n\\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\\nfrom six import BytesIO\\n\\nerror = log.error\\nwarning = log.warning\\nwarn = log.warning\\ninfo = log.info\\ndebug = log.debug\\nsuccess = log.success\\n\\n__all__ = [x for x in tuple(globals()) if x != '__name__']\\n\",\n \"path\": \"pwn/toplevel.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n","path":"pwn/toplevel.py"}],"string":"[\n {\n \"content\": \"# Get all the modules from pwnlib\\nimport collections\\nimport logging\\nimport math\\nimport operator\\nimport os\\nimport platform\\nimport re\\nimport requests\\nimport socks\\nimport signal\\nimport string\\nimport struct\\nimport subprocess\\nimport sys\\nimport tempfile\\nimport threading\\nimport time\\n\\nfrom pprint import pprint\\n\\nimport pwnlib\\nfrom pwnlib import *\\nfrom pwnlib.asm import *\\nfrom pwnlib.context import Thread\\nfrom pwnlib.context import context, LocalContext\\nfrom pwnlib.dynelf import DynELF\\nfrom pwnlib.encoders import *\\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\\nfrom pwnlib.elf.elf import ELF, load\\nfrom pwnlib.encoders import *\\nfrom pwnlib.exception import PwnlibException\\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\\nfrom pwnlib.filepointer import *\\nfrom pwnlib.flag import *\\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\\nfrom pwnlib.log import getLogger\\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\\nfrom pwnlib.regsort import *\\nfrom pwnlib.replacements import *\\nfrom pwnlib.rop import ROP\\nfrom pwnlib.rop.call import AppendedArgument\\nfrom pwnlib.rop.srop import SigreturnFrame\\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\\nfrom pwnlib.runner import *\\nfrom pwnlib.term.readline import str_input\\nfrom pwnlib.timeout import Timeout\\nfrom pwnlib.tubes.listen import listen\\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\\nfrom pwnlib.tubes.serialtube import serialtube\\nfrom pwnlib.tubes.server import server\\nfrom pwnlib.tubes.ssh import ssh\\nfrom pwnlib.tubes.tube import tube\\nfrom pwnlib.ui import *\\nfrom pwnlib.util import crc\\nfrom pwnlib.util import iters\\nfrom pwnlib.util import net\\nfrom pwnlib.util import proc\\nfrom pwnlib.util import safeeval\\nfrom pwnlib.util.crc import BitPolynom\\nfrom pwnlib.util.cyclic import *\\nfrom pwnlib.util.fiddling import *\\nfrom pwnlib.util.getdents import *\\nfrom pwnlib.util.hashes import *\\nfrom pwnlib.util.lists import *\\nfrom pwnlib.util.misc import *\\nfrom pwnlib.util.packing import *\\nfrom pwnlib.util.proc import pidof\\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\\nfrom pwnlib.util.splash import *\\nfrom pwnlib.util.web import *\\n\\n# Promote these modules, so that \\\"from pwn import *\\\" will let you access them\\n\\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\\nfrom six import BytesIO\\n\\nerror = log.error\\nwarning = log.warning\\nwarn = log.warning\\ninfo = log.info\\ndebug = log.debug\\nsuccess = log.success\\n\\n# Equivalence with the default behavior of \\\"from import *\\\"\\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\\n\",\n \"path\": \"pwn/toplevel.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex ba2970645..4929c0906 100644\n--- a/CHANGELOG.md\n+++ b/CHANGELOG.md\n@@ -68,6 +68,7 @@ The table below shows which release corresponds to each branch, and what date th\n - [#1758][1758] Remove eval from cli\n - [#1780][1780] Re-add Python2 to the official Dockerfile\n - [#1941][1941] Disable all Android tests, `pwnlib.adb` is no longer supported in CI\n+- [#1811][1811] Remove unnecessary `pwn.toplevel.__all__`\n \n [1261]: https://github.com/Gallopsled/pwntools/pull/1261\n [1695]: https://github.com/Gallopsled/pwntools/pull/1695\n@@ -78,7 +79,7 @@ The table below shows which release corresponds to each branch, and what date th\n [1758]: https://github.com/Gallopsled/pwntools/pull/1758\n [1780]: https://github.com/Gallopsled/pwntools/pull/1780\n [1941]: https://github.com/Gallopsled/pwntools/pull/1941\n-[1786]: https://github.com/Gallopsled/pwntools/pull/1786\n+[1811]: https://github.com/Gallopsled/pwntools/pull/1811\n \n ## 4.4.0 (`beta`)\n \ndiff --git a/pwn/toplevel.py b/pwn/toplevel.py\nindex 0cc3f7509..5af57bf55 100644\n--- a/pwn/toplevel.py\n+++ b/pwn/toplevel.py\n@@ -83,4 +83,5 @@\n debug = log.debug\n success = log.success\n \n-__all__ = [x for x in tuple(globals()) if x != '__name__']\n+# Equivalence with the default behavior of \"from import *\"\n+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n"}}},{"rowIdx":416,"cells":{"in_source_id":{"kind":"string","value":"searx__searx-1477"},"issue":{"kind":"string","value":"New release?\nHi,\r\n\r\nI'm the Debian maintainer of searx and we are still shipping version 0.14.0 of searx because there has not been a more recent release since February.\r\n\r\nUnfortunately, we see a number of services that do not work anymore with 0.14.0 but which are fixed in git. Would it be possible to make a release soon?\r\n\r\nThe last commit to the repository was back in August. Is the project still alive? Debian plans to release its next stable version soon and we should not include searx if it's not maintained anymore.\r\n\r\nThanks!\r\ncheers, josch\n"},"before_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, \n'''\n\n# version of searx\nVERSION_MAJOR = 0\nVERSION_MINOR = 14\nVERSION_BUILD = 0\n\nVERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n VERSION_MINOR,\n VERSION_BUILD)\n","path":"searx/version.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n'''\\nsearx is free software: you can redistribute it and/or modify\\nit under the terms of the GNU Affero General Public License as published by\\nthe Free Software Foundation, either version 3 of the License, or\\n(at your option) any later version.\\n\\nsearx is distributed in the hope that it will be useful,\\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\nGNU Affero General Public License for more details.\\n\\nYou should have received a copy of the GNU Affero General Public License\\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\\n\\n(C) 2013- by Adam Tauber, \\n'''\\n\\n# version of searx\\nVERSION_MAJOR = 0\\nVERSION_MINOR = 14\\nVERSION_BUILD = 0\\n\\nVERSION_STRING = \\\"{0}.{1}.{2}\\\".format(VERSION_MAJOR,\\n VERSION_MINOR,\\n VERSION_BUILD)\\n\",\n \"path\": \"searx/version.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, \n'''\n\n# version of searx\nVERSION_MAJOR = 0\nVERSION_MINOR = 15\nVERSION_BUILD = 0\n\nVERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n VERSION_MINOR,\n VERSION_BUILD)\n","path":"searx/version.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n'''\\nsearx is free software: you can redistribute it and/or modify\\nit under the terms of the GNU Affero General Public License as published by\\nthe Free Software Foundation, either version 3 of the License, or\\n(at your option) any later version.\\n\\nsearx is distributed in the hope that it will be useful,\\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\nGNU Affero General Public License for more details.\\n\\nYou should have received a copy of the GNU Affero General Public License\\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\\n\\n(C) 2013- by Adam Tauber, \\n'''\\n\\n# version of searx\\nVERSION_MAJOR = 0\\nVERSION_MINOR = 15\\nVERSION_BUILD = 0\\n\\nVERSION_STRING = \\\"{0}.{1}.{2}\\\".format(VERSION_MAJOR,\\n VERSION_MINOR,\\n VERSION_BUILD)\\n\",\n \"path\": \"searx/version.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/AUTHORS.rst b/AUTHORS.rst\nindex 346f324d5d..674bfd758a 100644\n--- a/AUTHORS.rst\n+++ b/AUTHORS.rst\n@@ -83,3 +83,18 @@ generally made searx better:\n - Joseph Nuthalapati @josephkiranbabu\n - @maiki\n - Richard Didier @zeph33\n+- Michael Vieria @Themimitoof\n+- Richard Nespithal @rndevfx\n+- Stanislas @angristan\n+- @rinpatch\n+- g. s. @usernameisntallowed\n+- Léo Bourrel @bourrel\n+- @cy8aer\n+- @Popolon\n+- Alice Ferrazzi @aliceinwire\n+- @LiquidLemon\n+- @dadosch\n+- @Venca24\n+- @ZEROF\n+- Ivan Skytte Jørgensen @isj-privacore\n+- @miicha\ndiff --git a/CHANGELOG.rst b/CHANGELOG.rst\nindex 55bd33ee85..70e9e415cc 100644\n--- a/CHANGELOG.rst\n+++ b/CHANGELOG.rst\n@@ -1,3 +1,41 @@\n+0.15.0 2019.01.06\n+=================\n+\n+- New engines\n+\n+ - Acgsou (files, images, videos, music)\n+ - Duden.de (general)\n+ - Seznam (general)\n+ - Mojeek (general)\n+- New languages\n+\n+ - Catalan\n+ - Welsh\n+ - Basque\n+ - Persian (Iran)\n+ - Galician\n+ - Dutch (Belgium)\n+ - Telugu\n+ - Vietnamese\n+- New random answerers\n+\n+ - sha256\n+ - uuidv4\n+- New DOI resolsvers\n+\n+ - sci-hub.tw\n+- Fix Vim mode on Firefox\n+- Fix custom select in Oscar theme\n+- Engine fixes (duckduckgo, google news, currency convert, gigablast, google scholar, wikidata image, etymonline, google videos, startpage, bing image)\n+- Minor simple theme fixes\n+\n+- New Youtube icon in Oscar theme\n+- Get DOI rewriters from settings.yml\n+- Hide page buttons when infinite scrolling is enabled\n+- Update user agent versions\n+- Make Oscar style configurable\n+- Make suspend times of errored engines configurable\n+\n 0.14.0 2018.02.19\n =================\n \ndiff --git a/searx/settings.yml b/searx/settings.yml\nindex 35172bd6a6..00b001b6ce 100644\n--- a/searx/settings.yml\n+++ b/searx/settings.yml\n@@ -742,21 +742,27 @@ locales:\n en : English\n ar : العَرَبِيَّة (Arabic)\n bg : Български (Bulgarian)\n+ ca : Català (Catalan)\n cs : Čeština (Czech)\n+ cy : Cymraeg (Welsh)\n da : Dansk (Danish)\n de : Deutsch (German)\n el_GR : Ελληνικά (Greek_Greece)\n eo : Esperanto (Esperanto)\n es : Español (Spanish)\n+ eu : Euskara (Basque)\n+ fa_IR : (fārsī) فارسى (Persian)\n fi : Suomi (Finnish)\n fil : Wikang Filipino (Filipino)\n fr : Français (French)\n+ gl : Galego (Galician)\n he : עברית (Hebrew)\n hr : Hrvatski (Croatian)\n hu : Magyar (Hungarian)\n it : Italiano (Italian)\n ja : 日本語 (Japanese)\n nl : Nederlands (Dutch)\n+ nl_BE : Vlaams (Dutch_Belgium)\n pl : Polski (Polish)\n pt : Português (Portuguese)\n pt_BR : Português (Portuguese_Brazil)\n@@ -766,8 +772,10 @@ locales:\n sl : Slovenski (Slovene)\n sr : српски (Serbian)\n sv : Svenska (Swedish)\n+ te : తెలుగు (telugu)\n tr : Türkçe (Turkish)\n uk : українська мова (Ukrainian)\n+ vi : tiếng việt (㗂越)\n zh : 中文 (Chinese)\n zh_TW : 國語 (Taiwanese Mandarin)\n \ndiff --git a/searx/translations/ar/LC_MESSAGES/messages.mo b/searx/translations/ar/LC_MESSAGES/messages.mo\nindex b3579a1a71..052e5b5225 100644\nBinary files a/searx/translations/ar/LC_MESSAGES/messages.mo and b/searx/translations/ar/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/ar/LC_MESSAGES/messages.po b/searx/translations/ar/LC_MESSAGES/messages.po\nindex 4d7e55fe4d..645ca0ed6a 100644\n--- a/searx/translations/ar/LC_MESSAGES/messages.po\n+++ b/searx/translations/ar/LC_MESSAGES/messages.po\n@@ -3,14 +3,15 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n-# ButterflyOfFire , 2017-2018\n+# ButterflyOfFire ButterflyOfFire, 2018\n+# ButterflyOfFire, 2017-2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2018-01-23 17:54+0000\\n\"\n-\"Last-Translator: ButterflyOfFire \\n\"\n+\"PO-Revision-Date: 2018-09-08 08:23+0000\\n\"\n+\"Last-Translator: ButterflyOfFire ButterflyOfFire\\n\"\n \"Language-Team: Arabic (http://www.transifex.com/asciimoo/searx/language/ar/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -29,7 +30,7 @@ msgstr \"\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"خلل غير مُتوقّع\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -73,7 +74,7 @@ msgstr \"علوم\"\n \n #: searx/webapp.py:399 searx/webapp.py:658\n msgid \"Invalid settings, please edit your preferences\"\n-msgstr \"\"\n+msgstr \"إنّ الإعدادات خاطئة، يرجى تعديل خياراتك\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n@@ -97,7 +98,7 @@ msgstr \"مولّد قيمة عشوائية\"\n \n #: searx/answerers/random/answerer.py:54\n msgid \"Generate different random values\"\n-msgstr \"\"\n+msgstr \"توليد قِيم عشوائية مختلفة\"\n \n #: searx/answerers/statistics/answerer.py:53\n msgid \"Statistics functions\"\n@@ -288,7 +289,7 @@ msgstr \"اللغة الإفتراضية\"\n #: searx/templates/pix-art/preferences.html:20\n #: searx/templates/simple/preferences.html:120\n msgid \"Interface language\"\n-msgstr \"لغة العرض\"\n+msgstr \"لغة الواجهة\"\n \n #: searx/templates/courgette/preferences.html:34\n #: searx/templates/legacy/preferences.html:35\n@@ -371,7 +372,7 @@ msgstr \"اللون\"\n \n #: searx/templates/courgette/preferences.html:86\n msgid \"Blue (default)\"\n-msgstr \"أزرق )إفتراضي(\"\n+msgstr \"أزرق (إفتراضي)\"\n \n #: searx/templates/courgette/preferences.html:87\n msgid \"Violet\"\n@@ -581,13 +582,13 @@ msgstr \"عرض نتائج البحث في ألسنة جديدة\"\n #: searx/templates/oscar/preferences.html:117\n #: searx/templates/simple/preferences.html:145\n msgid \"On\"\n-msgstr \"\"\n+msgstr \"يشتغل\"\n \n #: searx/templates/legacy/preferences.html:88\n #: searx/templates/oscar/preferences.html:118\n #: searx/templates/simple/preferences.html:146\n msgid \"Off\"\n-msgstr \"\"\n+msgstr \"مُعطَّل\"\n \n #: searx/templates/legacy/result_templates/code.html:3\n #: searx/templates/legacy/result_templates/default.html:3\n@@ -626,7 +627,7 @@ msgstr \"محرك بحث يحمي الخصوصية و قابل للتهكير\"\n #: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n #: searx/templates/simple/macros.html:43\n msgid \"proxied\"\n-msgstr \"يمر عبر البروكسي\"\n+msgstr \"النفاذ عبر البروكسي\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n@@ -661,7 +662,7 @@ msgstr \"المجيبون\"\n #: searx/templates/oscar/preferences.html:17\n #: searx/templates/oscar/preferences.html:272\n msgid \"Cookies\"\n-msgstr \"الكوكيز\"\n+msgstr \"كعكات الكوكيز\"\n \n #: searx/templates/oscar/preferences.html:42\n #: searx/templates/simple/preferences.html:48\n@@ -676,12 +677,12 @@ msgstr \"يقوم بتغيير لغة واجهة البحث\"\n #: searx/templates/oscar/preferences.html:58\n #: searx/templates/simple/preferences.html:60\n msgid \"Find stuff as you type\"\n-msgstr \"\"\n+msgstr \"العثور على نتائج أثناء الكتابة\"\n \n #: searx/templates/oscar/preferences.html:69\n #: searx/templates/simple/preferences.html:173\n msgid \"Proxying image results through searx\"\n-msgstr \"\"\n+msgstr \"تمرير نتائج البحث عن الصور عبر بروكسي Searx\"\n \n #: searx/templates/oscar/preferences.html:78\n msgid \"\"\n@@ -865,7 +866,7 @@ msgstr \"معلومة !\"\n \n #: searx/templates/oscar/messages/no_cookies.html:4\n msgid \"currently, there are no cookies defined.\"\n-msgstr \"\"\n+msgstr \"حاليا لم تقم بتحديد أي مِن كعكات الكوكيز.\"\n \n #: searx/templates/oscar/messages/no_data_available.html:4\n msgid \"There is currently no data available. \"\ndiff --git a/searx/translations/bg/LC_MESSAGES/messages.mo b/searx/translations/bg/LC_MESSAGES/messages.mo\nindex 63b303a423..f80e5afcc9 100644\nBinary files a/searx/translations/bg/LC_MESSAGES/messages.mo and b/searx/translations/bg/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/ca/LC_MESSAGES/messages.mo b/searx/translations/ca/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..2ec3e05030\nBinary files /dev/null and b/searx/translations/ca/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/ca/LC_MESSAGES/messages.po b/searx/translations/ca/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..460091cd7b\n--- /dev/null\n+++ b/searx/translations/ca/LC_MESSAGES/messages.po\n@@ -0,0 +1,1014 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# Calbasi , 2018\n+# jmontane, 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-12-15 15:10+0000\\n\"\n+\"Last-Translator: jmontane\\n\"\n+\"Language-Team: Catalan (http://www.transifex.com/asciimoo/searx/language/ca/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: ca\\n\"\n+\"Plural-Forms: nplurals=2; plural=(n != 1);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"s'ha esgotat el temps d'espera\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"Excepció en la petició\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"Fallada no esperada\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"fitxers\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"general\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"música\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"xarxes socials\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"imatges\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"vídeos\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"informàtica\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"notícies\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"mapa\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"ciència\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"La configuració no és vàlida, editeu-la\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"La configuració no és vàlida\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"error en la cerca\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"fa {minutes} minuts\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"fa {hours} hores i {minutes} minuts\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"Generador de valor aleatori\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"Genera valors aleatoris diferents\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"Funcions estadístiques\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"Calcula {functions} dels arguments\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"Temps del motor (segons)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"Càrrega de la pàgina (segons)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Nombre de resultats\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Valoració\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"Valoració segons el resultat\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Errors\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(OBSOLET)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"Aquesta entrada ha estat substituïda per\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"No hi ha resum disponible per a aquesta publicació.\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"Reescriu els enllaços HTTP cap a HTTPS si és possible\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"Desplaçament infinit\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"Carrega automàticament la pàgina següent en desplaçar-se al final de la pàgina actual\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"Reescriu l'Open Access DOI\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"Evita els llocs de pagaments redirigint a versions d'accés obert de les publicacions si és possible\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Obre els enllaços de resultats en una pestanya nova\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"De forma predeterminada, els resultats s'obren en la mateixa finestra. Aquest connector canvia el comportament predeterminat i obre els enllaços en una finestra o pestanya nova. (Cal JavaScript)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"Cerca en la selecció de categories\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"Executa la cerca immediatament si hi ha seleccionada una categoria. Desactiveu-ho per a seleccionar més d'una categoria. (Cal JavaScript)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"Mostra la vostra IP si la consulta és «ip» i el valor «user agent» del navegador si la consulta conté «user agent».\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"Suprimeix l'URL de rastreig\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"Suprimeix els arguments de rastreig de les URL retornades\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"Dreceres de teclat del Vim\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"Navegació pels resultats de la cerca amb les dreceres a l'estil Vim (cal JavaScript). Pitgeu la tecla «h» en la pàgina principal o de resultats per a obtenir ajuda.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"No s'ha trobat la pàgina\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"Vés a %(search_page)s.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"pàgina de cerca\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"preferències\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"quant a\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Preferències\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Categories predeterminades\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Llengua de cerca\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Llengua predeterminada\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Llengua de la interfície\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Compleció automàtica\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"Servidor intermediari d'imatges\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Activat\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Desactivat\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Mètode\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"Cerca segura\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Estricta\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Moderada\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Cap\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Temes\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Color\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Blau (predeterminat)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Violat\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Verd\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Cian\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Taronja\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Vermell\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"Motors de cerca usats actualment\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"Nom del motor\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Categoria\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Permet\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Bloca\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"Aquesta configuració es desa en les galetes. Això ens permet no emmagatzemar les vostres dades.\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"Aquestes galetes només són per a la vostra conveniència. No les usem per a rastrejar-vos.\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"desa\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Restaura els valors predeterminats\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"enrere\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"URL de cerca\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Baixa els resultats\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Respostes\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Suggeriments\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"pàgina anterior\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"pàgina següent\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Cerca...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"Estadístiques del motor\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"context original\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Font\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Descarregador\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"enllaç magnet\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"fitxer torrent\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Feu clic en la lupa per a executar la cerca\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Resultats en pestanyes noves\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"Activat\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"Desactivat\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"en memòria cau\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Configuració avançada\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Tanca\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Error!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Funciona amb\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"un meta motor de cerca personalitzable i respectuós amb la privadesa\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"en servidor intermediari\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"suportat\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"no suportat\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"General\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Motorrs\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Connectat\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Resposter\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Galetes\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"En quina llengua preferiu cercar?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Canvia la llengua de la disposició\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Troba coses tal com escriu\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"Envia els resultats d'imatges via el servidor intermediari del searx\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"Canvia com es trameten els formularis, més informació sobre els mètodes de petició\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"Filtra el contingut\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Canvia la disposició del searx\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Trieu un estil per a aquest tema\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Estil\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"Solucionador de l'Open Access DOI\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"Redirigeix cap a versions d'accés obert de les publicacions si són disponibles (cal un connector)\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Drecera\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"Llengua seleccionada\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Interval de temps\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"Temps amitjanat\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"Temps màxim\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"Aquest és el llistat dels mòduls de resposta ràpida del searx.\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Nom\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Paraules clau\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Descripció\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Exemples\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Aquest és el llistat de les galetes, i els seu valor, que el searx té desats en el vostre equip.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"Amb aquest llistat, podeu avaluar la transparència del searx.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Nom de la galeta\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Valor\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"URL de cerca de les preferències desades actualment\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"Nota: si indiqueu configuracions personalitzades en la URL de cerca podeu reduir la privadesa, amb filtració de dades, en fer clic als llocs dels resultats.\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Resultats de la cerca\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"Proveu a cercar:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"Els motors no poden obtenir cap resultat\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Enllaços\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Comença la cerca\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"estadístiques\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"En qualsevol moment\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"Les darreres 24 hores\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"La setmana passada\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"El darrer mes\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"El darrer any\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"Atenció!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Sembla que esteu usant searx per primer cop.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Informació!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"actualment, no hi ha definida cap galeta.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"Actualment, no hi ha dades disponibles.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"Els motors no poden obtenir cap resultat\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"Torneu-ho a intentar més tard o useu una altra instància del searx.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Disculpeu!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"no hem trobat cap resultat. Feu una consulta diferent o cerqueu en més categories.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Ben fet!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"La configuració s'ha desat correctament.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"Cagundena!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Alguna cosa ha anat malament.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"mostra el contingut multimèdia\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"amaga el contingut multimèdia\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Obtén la imatge\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Mostra el codi font\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"mostra el mapa\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"amaga el mapa\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"mostra els detalls\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"amaga els detalls\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Mida del fitxer\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Bytes\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Nombre de fiters\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"mostra el vídeo\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"amaga el vídeo\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Carrega'n més...\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"No s'ha trobat cap element\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Suporta la llengua seleccionada\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"Interfície d'usuari\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"Privadesa\"\ndiff --git a/searx/translations/cs/LC_MESSAGES/messages.mo b/searx/translations/cs/LC_MESSAGES/messages.mo\nindex 3e638afa32..eded7150ad 100644\nBinary files a/searx/translations/cs/LC_MESSAGES/messages.mo and b/searx/translations/cs/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/cs/LC_MESSAGES/messages.po b/searx/translations/cs/LC_MESSAGES/messages.po\nindex ef96271fc0..8d3ce3c3e9 100644\n--- a/searx/translations/cs/LC_MESSAGES/messages.po\n+++ b/searx/translations/cs/LC_MESSAGES/messages.po\n@@ -4,24 +4,25 @@\n # \n # Translators:\n # Clon , 2017\n+# Václav Zouzalík , 2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-12-18 12:03+0000\\n\"\n+\"Last-Translator: Václav Zouzalík \\n\"\n \"Language-Team: Czech (http://www.transifex.com/asciimoo/searx/language/cs/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n \"Content-Transfer-Encoding: 8bit\\n\"\n \"Generated-By: Babel 2.3.4\\n\"\n \"Language: cs\\n\"\n-\"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\\n\"\n+\"Plural-Forms: nplurals=4; plural=(n == 1 && n % 1 == 0) ? 0 : (n >= 2 && n <= 4 && n % 1 == 0) ? 1: (n % 1 != 0 ) ? 2 : 3;\\n\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"timeout\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n@@ -77,7 +78,7 @@ msgstr \"Neplatné nastavení, upravte svoje předvolby\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"Neplatné nastavení\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n@@ -131,7 +132,7 @@ msgstr \"\"\n \n #: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n msgid \"Errors\"\n-msgstr \"Chyb\"\n+msgstr \"Chyby\"\n \n #: searx/engines/pdbe.py:87\n msgid \"{title}&nbsp;(OBSOLETE)\"\n@@ -187,13 +188,13 @@ msgstr \"Hledat ve vybrané kategorii\"\n msgid \"\"\n \"Perform search immediately if a category selected. Disable to select \"\n \"multiple categories. (JavaScript required)\"\n-msgstr \"\"\n+msgstr \"Vyhledávejte okamžitě, pokud je vybrána kategorie. Vypněte, pokud potřebujete zvolit vícero kategorií. (vyžaduje JavaScript)\"\n \n #: searx/plugins/self_info.py:20\n msgid \"\"\n \"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n \"contains \\\"user agent\\\".\"\n-msgstr \"\"\n+msgstr \"Ukáže vaši IP adresu, pokud dotaz je \\\"ip\\\", a váš user agent, pokud dotatz obsahuje \\\"user agent\\\".\"\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n@@ -375,7 +376,7 @@ msgstr \"Modrá (základní)\"\n \n #: searx/templates/courgette/preferences.html:87\n msgid \"Violet\"\n-msgstr \"\"\n+msgstr \"Fialová\"\n \n #: searx/templates/courgette/preferences.html:88\n msgid \"Green\"\n@@ -383,7 +384,7 @@ msgstr \"Zelená\"\n \n #: searx/templates/courgette/preferences.html:89\n msgid \"Cyan\"\n-msgstr \"\"\n+msgstr \"Modrozelená\"\n \n #: searx/templates/courgette/preferences.html:90\n msgid \"Orange\"\n@@ -543,14 +544,14 @@ msgstr \"původní kontext\"\n #: searx/templates/oscar/result_templates/torrent.html:6\n #: searx/templates/simple/result_templates/torrent.html:9\n msgid \"Seeder\"\n-msgstr \"\"\n+msgstr \"Seeder\"\n \n #: searx/templates/courgette/result_templates/torrent.html:7\n #: searx/templates/legacy/result_templates/torrent.html:11\n #: searx/templates/oscar/result_templates/torrent.html:6\n #: searx/templates/simple/result_templates/torrent.html:9\n msgid \"Leecher\"\n-msgstr \"\"\n+msgstr \"Leecher\"\n \n #: searx/templates/courgette/result_templates/torrent.html:9\n #: searx/templates/legacy/result_templates/torrent.html:9\n@@ -676,7 +677,7 @@ msgstr \"Změnít jazyk prostředí\"\n #: searx/templates/oscar/preferences.html:58\n #: searx/templates/simple/preferences.html:60\n msgid \"Find stuff as you type\"\n-msgstr \"\"\n+msgstr \"Vyhledávat během psaní\"\n \n #: searx/templates/oscar/preferences.html:69\n #: searx/templates/simple/preferences.html:173\n@@ -729,7 +730,7 @@ msgstr \"Zkratka\"\n #: searx/templates/oscar/preferences.html:164\n #: searx/templates/oscar/preferences.html:174\n msgid \"Selected language\"\n-msgstr \"\"\n+msgstr \"Zvolený jazyk\"\n \n #: searx/templates/oscar/preferences.html:166\n #: searx/templates/oscar/preferences.html:172\n@@ -853,7 +854,7 @@ msgstr \"Rok\"\n #: searx/templates/oscar/messages/first_time.html:6\n #: searx/templates/oscar/messages/no_data_available.html:3\n msgid \"Heads up!\"\n-msgstr \"Hlavy vzhůru!\"\n+msgstr \"Hlavu vzhůru!\"\n \n #: searx/templates/oscar/messages/first_time.html:7\n msgid \"It look like you are using searx first time.\"\n@@ -879,7 +880,7 @@ msgstr \"\"\n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"\"\n+msgstr \"Zkuste prosím později, nebo na jiné instanci searxu.\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n@@ -891,7 +892,7 @@ msgstr \"Pardón!\"\n msgid \"\"\n \"we didn't find any results. Please use another query or search in more \"\n \"categories.\"\n-msgstr \"nenašly jsme žádné výsledky. Prosím použíjte jiný dotaz nebo hledejte ve více kategoriích.\"\n+msgstr \"Nenašli jsme žádné výsledky. Použijte prosím jiný dotaz nebo hledejte ve více kategoriích.\"\n \n #: searx/templates/oscar/messages/save_settings_successfull.html:7\n msgid \"Well done!\"\n@@ -1010,4 +1011,4 @@ msgstr \"\"\n \n #: searx/templates/simple/preferences.html:154\n msgid \"Privacy\"\n-msgstr \"\"\n+msgstr \"Soukromí\"\ndiff --git a/searx/translations/cy/LC_MESSAGES/messages.mo b/searx/translations/cy/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..066d268e03\nBinary files /dev/null and b/searx/translations/cy/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/cy/LC_MESSAGES/messages.po b/searx/translations/cy/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..3344c6d1d5\n--- /dev/null\n+++ b/searx/translations/cy/LC_MESSAGES/messages.po\n@@ -0,0 +1,1013 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# Aled Powell , 2019\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2019-01-05 13:50+0000\\n\"\n+\"Last-Translator: Aled Powell \\n\"\n+\"Language-Team: Welsh (http://www.transifex.com/asciimoo/searx/language/cy/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: cy\\n\"\n+\"Plural-Forms: nplurals=4; plural=(n==1) ? 0 : (n==2) ? 1 : (n != 8 && n != 11) ? 2 : 3;\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"ffeiliau\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"cyffredinol\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"cerddoriaeth\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"cyfryngau cymdeithasol\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"delweddau\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"fideos\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"Technoleg\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"newyddion\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"map\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"gwyddoniaeth\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"Gosodiadau annilys. Addasa dy ddewisiadau.\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"Gosodiadau annilys\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"gwall chwilio\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} munud yn ôl\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"{hours} awr, {minutes} munud yn ôl\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Nifer o ganlyniadau\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Sgoriau\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Gwallau\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Agor dolenni canlyniadau mewn tabiau newydd yn y porwr\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"Mae canlyniadau fel arfer yn cael eu hagor yn yr un ffenestr. Mae'r ategolyn hwn yn newid hyn fel bod dolenni yn cael eu hagor mewn tabiau/ffenestri newydd. (Angen JavaScript)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"Mynd i %(search_page)s.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"tudalen chwilio\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"dewisiadau\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"ynghylch\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Dewisiadau\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Categorïau arferol\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Iaith chwilio\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Iaith arferol\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Iaith y rhyngwyneb\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Awto-gwblhau\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Galluogwyd\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Analluogwyd\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Dull\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Caeth\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Cymhedrol\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Dim\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Themâu\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Lliw\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Glas (arferol)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Fioled\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Gwyrdd\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Gwyrddlas\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Oren\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Coch\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Categori\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Caniatáu\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Rhwystro\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"cadw\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Ailosod rhagosodiadau\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"nôl\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Lawrlwytho'r canlyniadau\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Atebion\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Awgrymiadau\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"tudalen ddiwethaf\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"tudalen nesaf\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Chwilio am...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"cyd-destun gwreiddiol\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Hadau\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Lawrlwythwyr\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"dolen magnet\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"ffeil torrent\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Cliciwch ar y chwyddwydr i berfformio chwiliad\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Canlyniadau mewn tabiau newydd\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"Ymlaen\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"I ffwrdd\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Gosodiadau uwch\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Cau\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Gwall!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Pwerwyd gan\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"cefnogir\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"ni chefnogir\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"Cyffredin\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Peiriannau\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Ategolion\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Atebwyr\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Cwcis\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"Ym mha iaith wyt ti'n ffafrio chwilio?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Newid iaith rhyngwyneb searX\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Darganfod pethau wrth i chi deipio\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Newid cynllun searX\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Dewis arddull ar gyfer y thema hon\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Arddull\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Llwybr Byr\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"Iaith a ddewiswyd\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Cyfnod amser\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Enw\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Allweddeiriau\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Disgrifiad\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Enghreifftiau\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Dyma restr y cwcis, a'u gwerthoedd, mae searX yn eu cadw ar eich dyfais.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Enw cwci\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Gwerth\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Canlyniadau chwilio\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"Rho gynnig ar chwilio am:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"Ni all y peiriannau cael canlyniadau\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Dolenni\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Dechrau chwilio\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"ystadegau\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"Unrhyw amser\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"Y diwrnod diwethaf\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"Yr wythnos diwethaf\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"Y mis diwethaf\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"Y flwyddyn ddiwethaf\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Mae'n ymddangos eich bod yn defnyddio searx am y tro cyntaf.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Gwybodaeth!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"Does dim data ar gael ar hyn o bryd.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"Ni all y peiriannau cael canlyniadau.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Sori!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"Ni ddaethpwyd o hyd i unrhyw ganlyniadau. Defnyddiwch derm(au) chwilio gwahanol neu ehangu'r chwilio i ragor o gategorïau.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Da iawn!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"Cadwyd y gosodiadau yn iawn!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Aeth rhywbeth o'i le.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"dangos cyfryngau\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"cuddio cyfryngau\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Cael y ddelwedd\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Gweld y ffynhonnell\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"dangos map\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"cuddio map\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"dangos manylion\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"cuddio manylion\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Maint ffeil\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Beitiau\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Nifer o Ffeiliau\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"dangos fideo\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"cuddio fideo\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Dysgu mwy...\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"Ni chanfuwyd eitem\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Cefnogir yr iaith a ddewiswyd\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"Rhyngwyneb defnyddiwr\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"Preifatrwydd\"\ndiff --git a/searx/translations/da/LC_MESSAGES/messages.mo b/searx/translations/da/LC_MESSAGES/messages.mo\nindex 21cea9ef7a..8813c0779a 100644\nBinary files a/searx/translations/da/LC_MESSAGES/messages.mo and b/searx/translations/da/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/de/LC_MESSAGES/messages.mo b/searx/translations/de/LC_MESSAGES/messages.mo\nindex d6458785db..a525fbf1e8 100644\nBinary files a/searx/translations/de/LC_MESSAGES/messages.mo and b/searx/translations/de/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/de/LC_MESSAGES/messages.po b/searx/translations/de/LC_MESSAGES/messages.po\nindex 2bf22911e6..64f253ebd9 100644\n--- a/searx/translations/de/LC_MESSAGES/messages.po\n+++ b/searx/translations/de/LC_MESSAGES/messages.po\n@@ -12,6 +12,7 @@\n # Max , 2015\n # pointhi, 2014\n # rike, 2014\n+# S R , 2018\n # stf , 2014\n # stf , 2014\n # Thomas Pointhuber, 2016-2017\n@@ -21,8 +22,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-12-09 13:14+0000\\n\"\n-\"Last-Translator: Mario Siegmann \\n\"\n+\"PO-Revision-Date: 2018-12-28 11:02+0000\\n\"\n+\"Last-Translator: S R \\n\"\n \"Language-Team: German (http://www.transifex.com/asciimoo/searx/language/de/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -159,25 +160,25 @@ msgstr \"Keine Zusammenfassung für die Veröffentlichung verfügbar.\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n-msgstr \"Wandelt wenn möglich HTTP Links in HTTPS Links um\"\n+msgstr \"Wandelt wenn möglich HTTP-Links in HTTPS-Links um\"\n \n #: searx/plugins/infinite_scroll.py:3\n msgid \"Infinite scroll\"\n-msgstr \"Undendliches Scrollen\"\n+msgstr \"Unendliches Scrollen\"\n \n #: searx/plugins/infinite_scroll.py:4\n msgid \"Automatically load next page when scrolling to bottom of current page\"\n-msgstr \"Lädt automatisch die nächste Seite wenn das Ende der aktuellen Seite erreicht wurde\"\n+msgstr \"Lädt automatisch die nächste Seite, wenn das Ende der aktuellen Seite erreicht wurde\"\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"Open Access DOI rewrite\"\n+msgstr \"Open-Access-DOI umschreiben\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n \"Avoid paywalls by redirecting to open-access versions of publications when \"\n \"available\"\n-msgstr \"Bezahlbeschränkungen durch die Weiterleitung zu der verfügbaren Open-Access Version vermeiden\"\n+msgstr \"Bezahlbeschränkungen durch die Weiterleitung zu der verfügbaren Open-Access-Version vermeiden\"\n \n #: searx/plugins/open_results_on_new_tab.py:18\n #: searx/templates/oscar/preferences.html:114\n@@ -223,7 +224,7 @@ msgstr \"An Vim angelehnte Tastenkombinationen\"\n msgid \"\"\n \"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n \"\\\"h\\\" key on main or result page to get help.\"\n-msgstr \"Navigiere in der Ergebnisseite mit Vim ähnlichen Tastataurkombinationen (es wird JavaScript benötigt).\\nDrücke \\\"h\\\" auf der Start bzw. Ergebnisseite um eine Hifefenster anzuzeigen\"\n+msgstr \"Navigiere in der Ergebnisseite mit Vim ähnlichen Tastaturkombinationen (es wird JavaScript benötigt).\\nDrücke \\\"h\\\" auf der Start- bzw. Ergebnisseite, um ein Hifefenster anzuzeigen\"\n \n #: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n #: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\ndiff --git a/searx/translations/el_GR/LC_MESSAGES/messages.mo b/searx/translations/el_GR/LC_MESSAGES/messages.mo\nindex c9798d318b..2bc6a57a37 100644\nBinary files a/searx/translations/el_GR/LC_MESSAGES/messages.mo and b/searx/translations/el_GR/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/en/LC_MESSAGES/messages.mo b/searx/translations/en/LC_MESSAGES/messages.mo\nindex 9632e6ca50..0c270c06dd 100644\nBinary files a/searx/translations/en/LC_MESSAGES/messages.mo and b/searx/translations/en/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/eo/LC_MESSAGES/messages.mo b/searx/translations/eo/LC_MESSAGES/messages.mo\nindex cb0bc55702..cfad4d49ed 100644\nBinary files a/searx/translations/eo/LC_MESSAGES/messages.mo and b/searx/translations/eo/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/eo/LC_MESSAGES/messages.po b/searx/translations/eo/LC_MESSAGES/messages.po\nindex de31c9a4bf..cd7ffe80eb 100644\n--- a/searx/translations/eo/LC_MESSAGES/messages.po\n+++ b/searx/translations/eo/LC_MESSAGES/messages.po\n@@ -6,13 +6,14 @@\n # Jack Stehn , 2017\n # juanda097 , 2015-2016\n # pizzaiolo, 2016\n+# Václav Zouzalík , 2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-11-22 08:33+0000\\n\"\n+\"Last-Translator: Václav Zouzalík \\n\"\n \"Language-Team: Esperanto (http://www.transifex.com/asciimoo/searx/language/eo/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -23,15 +24,15 @@ msgstr \"\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"tempolimo\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"escepto de peto\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"neatendita paneo\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -47,7 +48,7 @@ msgstr \"muziko\"\n \n #: searx/webapp.py:139\n msgid \"social media\"\n-msgstr \"sociaj retservoj\"\n+msgstr \"sociaj retoj\"\n \n #: searx/webapp.py:140\n msgid \"images\"\n@@ -75,11 +76,11 @@ msgstr \"scienco\"\n \n #: searx/webapp.py:399 searx/webapp.py:658\n msgid \"Invalid settings, please edit your preferences\"\n-msgstr \"\"\n+msgstr \"Nevalidaj agordoj, bonvolu redakti viajn agordojn\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"Nevalidaj agordoj\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n@@ -95,23 +96,23 @@ msgstr \"antaŭ {hours} horo(j), {minutes} minuto(j)\"\n \n #: searx/answerers/random/answerer.py:53\n msgid \"Random value generator\"\n-msgstr \"\"\n+msgstr \"Hazardvalora generilo\"\n \n #: searx/answerers/random/answerer.py:54\n msgid \"Generate different random values\"\n-msgstr \"\"\n+msgstr \"Ĝi generas diversajn hazardajn valorojn\"\n \n #: searx/answerers/statistics/answerer.py:53\n msgid \"Statistics functions\"\n-msgstr \"\"\n+msgstr \"Statistikaj funkcioj\"\n \n #: searx/answerers/statistics/answerer.py:54\n msgid \"Compute {functions} of the arguments\"\n-msgstr \"\"\n+msgstr \"Kalkulas {functions} el la argumentoj\"\n \n #: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n msgid \"Engine time (sec)\"\n-msgstr \"\"\n+msgstr \"Motora tempo (s)\"\n \n #: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n msgid \"Page loads (sec)\"\n@@ -137,19 +138,19 @@ msgstr \"Eraroj\"\n \n #: searx/engines/pdbe.py:87\n msgid \"{title}&nbsp;(OBSOLETE)\"\n-msgstr \"\"\n+msgstr \"{title}&nbsp;(MALNOVA)\"\n \n #: searx/engines/pdbe.py:91\n msgid \"This entry has been superseded by\"\n-msgstr \"\"\n+msgstr \"Tiu ĉi enigo estis anstataŭigita per\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"\"\n+msgstr \"Neniu resumo atingeblas por tiu ĉi eldonaĵo.\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n-msgstr \"Reverki HTTP ligiloj HTTP se eble\"\n+msgstr \"Ŝanĝi HTTP-ligilojn al HTTPS, se eblas\"\n \n #: searx/plugins/infinite_scroll.py:3\n msgid \"Infinite scroll\"\n@@ -157,17 +158,17 @@ msgstr \"Senfina rulumado\"\n \n #: searx/plugins/infinite_scroll.py:4\n msgid \"Automatically load next page when scrolling to bottom of current page\"\n-msgstr \"Aŭtomate ŝarĝi la sekvan paĝon kiam rulumante al la subo de la nuna paĝo\"\n+msgstr \"Aŭtomate ŝarĝi sekvan paĝon rulumante al la subo de la nuna paĝo\"\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"Malfermalira COI-ŝanĝo\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n \"Avoid paywalls by redirecting to open-access versions of publications when \"\n \"available\"\n-msgstr \"Eviti pagomurojn alidirektante al liberaj versioj de eldonaĵoj kiam eblas\"\n+msgstr \"Eviti pagomurojn per direkto al malfermaliraj versioj de eldonaĵoj, se eblas\"\n \n #: searx/plugins/open_results_on_new_tab.py:18\n #: searx/templates/oscar/preferences.html:114\n@@ -179,7 +180,7 @@ msgstr \"Malfermi rezultligilojn en novaj retumilaj langetoj\"\n msgid \"\"\n \"Results are opened in the same window by default. This plugin overwrites the\"\n \" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n-msgstr \"Oni malfermas rezultojn en la sama langeto defaŭlte. Ĉi tiu aldonaĵo ŝanĝas la kutima agmaniero por malfermi ligilojn en novaj langetoj/fenestroj. (ĜavaSkripto bezonata)\"\n+msgstr \"Oni malfermas rezultojn en la sama langeto defaŭlte. Ĉi tiu aldonaĵo ŝanĝas la kutiman agmanieron por malfermi ligilojn en novaj langetoj/fenestroj. (ĜavoSkripto bezonata)\"\n \n #: searx/plugins/search_on_category_select.py:18\n msgid \"Search on category select\"\n@@ -189,7 +190,7 @@ msgstr \"Serĉi en elektita kategorio\"\n msgid \"\"\n \"Perform search immediately if a category selected. Disable to select \"\n \"multiple categories. (JavaScript required)\"\n-msgstr \"Serĉi tuj se oni elektas kategorion. Malŝaltu ĝin por elekti plurajn kategoriojn (ĜavaSkripto bezonata)\"\n+msgstr \"Serĉi tuj se oni elektas kategorion. Malŝaltu ĝin por elekti plurajn kategoriojn (ĜavoSkripto bezonata)\"\n \n #: searx/plugins/self_info.py:20\n msgid \"\"\n@@ -203,7 +204,7 @@ msgstr \"Forigilo de URL-spuriloj\"\n \n #: searx/plugins/tracker_url_remover.py:27\n msgid \"Remove trackers arguments from the returned URL\"\n-msgstr \"Forviŝi spuraj esprimoj de la URL\"\n+msgstr \"Forviŝi spurajn argumentojn el la ricevita URL\"\n \n #: searx/plugins/vim_hotkeys.py:3\n msgid \"Vim-like hotkeys\"\n@@ -213,7 +214,7 @@ msgstr \"Vim-ŝajnaj klavkomandoj\"\n msgid \"\"\n \"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n \"\\\"h\\\" key on main or result page to get help.\"\n-msgstr \"Tranavigi serĉrezultojn per Vim-ŝajnaj klavkomandoj (ĜavaSkripto bezonata). Premu \\\"h\\\" por helptekstaro en rezultpaĝo.\"\n+msgstr \"Tranavigi serĉrezultojn per Vim-ŝajnaj klavkomandoj (ĜavoSkripto bezonata). Premu \\\"h\\\" por helptekstaro en ĉef- aŭ rezultpaĝo.\"\n \n #: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n #: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n@@ -242,7 +243,7 @@ msgstr \"Serĉopaĝo\"\n #: searx/templates/oscar/preferences.html:3\n #: searx/templates/pix-art/index.html:8\n msgid \"preferences\"\n-msgstr \"preferoj\"\n+msgstr \"agordoj\"\n \n #: searx/templates/courgette/index.html:11\n #: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n@@ -256,7 +257,7 @@ msgstr \"pri\"\n #: searx/templates/pix-art/preferences.html:5\n #: searx/templates/simple/preferences.html:26\n msgid \"Preferences\"\n-msgstr \"Preferoj\"\n+msgstr \"Agordoj\"\n \n #: searx/templates/courgette/preferences.html:9\n #: searx/templates/legacy/preferences.html:9\n@@ -282,7 +283,7 @@ msgstr \"Serĉolingvo\"\n #: searx/templates/simple/languages.html:2\n #: searx/templates/simple/preferences.html:42\n msgid \"Default language\"\n-msgstr \"\"\n+msgstr \"Defaŭlta lingvo\"\n \n #: searx/templates/courgette/preferences.html:24\n #: searx/templates/legacy/preferences.html:25\n@@ -400,7 +401,7 @@ msgstr \"Ruĝa\"\n #: searx/templates/pix-art/preferences.html:49\n #: searx/templates/simple/preferences.html:77\n msgid \"Currently used search engines\"\n-msgstr \" Aktuale uzitajn serĉilojn\"\n+msgstr \" Aktuale uzataj serĉiloj\"\n \n #: searx/templates/courgette/preferences.html:100\n #: searx/templates/legacy/preferences.html:97\n@@ -445,7 +446,7 @@ msgstr \"Bloki\"\n msgid \"\"\n \"These settings are stored in your cookies, this allows us not to store this \"\n \"data about you.\"\n-msgstr \"Tiuj agordoj estas konservitaj en viaj kuketoj kaj tio eblas, ke ni ne konservu tiujn datumojn pri vi.\"\n+msgstr \"Tiuj ĉi agordoj estas konservitaj en viaj kuketoj, kio ebligas al ni ne konservi tiujn datumojn pri vi en nia servilo.\"\n \n #: searx/templates/courgette/preferences.html:124\n #: searx/templates/legacy/preferences.html:121\n@@ -478,7 +479,7 @@ msgstr \"Reagordi al defaŭlto\"\n #: searx/templates/pix-art/preferences.html:79\n #: searx/templates/simple/preferences.html:187\n msgid \"back\"\n-msgstr \"antaŭe\"\n+msgstr \"antaŭen\"\n \n #: searx/templates/courgette/results.html:12\n #: searx/templates/legacy/results.html:13\n@@ -492,7 +493,7 @@ msgstr \"Serĉi URL\"\n #: searx/templates/oscar/results.html:141\n #: searx/templates/simple/results.html:62\n msgid \"Download results\"\n-msgstr \"Alŝutaj rezultoj\"\n+msgstr \"Elŝuti rezultojn\"\n \n #: searx/templates/courgette/results.html:34\n #: searx/templates/legacy/results.html:35\n@@ -519,7 +520,7 @@ msgstr \" antaŭa paĝo\"\n #: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n #: searx/templates/simple/results.html:145\n msgid \"next page\"\n-msgstr \" sekvanta paĝo\"\n+msgstr \" sekva paĝo\"\n \n #: searx/templates/courgette/search.html:3\n #: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n@@ -615,11 +616,11 @@ msgstr \"Fermi\"\n #: searx/templates/simple/messages/no_results.html:4\n #: searx/templates/simple/results.html:25\n msgid \"Error!\"\n-msgstr \"\"\n+msgstr \"Eraro!\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"Powered by\"\n-msgstr \"Funkciigita de\"\n+msgstr \"Funkciigita per\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"a privacy-respecting, hackable metasearch engine\"\n@@ -628,15 +629,15 @@ msgstr \"kodumebla metaserĉilo kiu respektas vian privatecon\"\n #: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n #: searx/templates/simple/macros.html:43\n msgid \"proxied\"\n-msgstr \"\"\n+msgstr \"prokurata\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n-msgstr \"\"\n+msgstr \"subtenata\"\n \n #: searx/templates/oscar/macros.html:96\n msgid \"not supported\"\n-msgstr \"\"\n+msgstr \"nesubtenata\"\n \n #: searx/templates/oscar/preferences.html:13\n #: searx/templates/oscar/preferences.html:22\n@@ -658,7 +659,7 @@ msgstr \"Aldonaĵoj\"\n #: searx/templates/oscar/preferences.html:16\n #: searx/templates/oscar/preferences.html:245\n msgid \"Answerers\"\n-msgstr \"\"\n+msgstr \"Respondiloj\"\n \n #: searx/templates/oscar/preferences.html:17\n #: searx/templates/oscar/preferences.html:272\n@@ -673,7 +674,7 @@ msgstr \"Kiun lingvon vi pli ŝatas por serĉi?\"\n #: searx/templates/oscar/preferences.html:48\n #: searx/templates/simple/preferences.html:128\n msgid \"Change the language of the layout\"\n-msgstr \"Ŝanĝi la fasonadan lingvon\"\n+msgstr \"Ŝanĝi lingvon de la fasono\"\n \n #: searx/templates/oscar/preferences.html:58\n #: searx/templates/simple/preferences.html:60\n@@ -690,7 +691,7 @@ msgid \"\"\n \"Change how forms are submited, learn more about request methods\"\n-msgstr \" Ŝanĝi kiel formoj estas senditaj, < href=\\\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\\\" rel=\\\"external\\\"> Lerni pli pri peto-metodoj \"\n+msgstr \"Ŝanĝi kiel formoj estas sendataj, sciu pli pri peto-metodoj \"\n \n #: searx/templates/oscar/preferences.html:87\n #: searx/templates/simple/preferences.html:71\n@@ -700,7 +701,7 @@ msgstr \"Filtri enhavon\"\n #: searx/templates/oscar/preferences.html:97\n #: searx/templates/simple/preferences.html:139\n msgid \"Change searx layout\"\n-msgstr \"Ŝanĝi searx-fasonadon\"\n+msgstr \"Ŝanĝi fasonon de Searx\"\n \n #: searx/templates/oscar/preferences.html:106\n #: searx/templates/oscar/preferences.html:111\n@@ -714,13 +715,13 @@ msgstr \"Stilo\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"Malfermalira COI-solvilo\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"Direkti al malfermaliraj versioj de eldonaĵoj, se eblas (aldonaĵo necesas)\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n@@ -731,13 +732,13 @@ msgstr \"Fulmoklavo\"\n #: searx/templates/oscar/preferences.html:164\n #: searx/templates/oscar/preferences.html:174\n msgid \"Selected language\"\n-msgstr \"\"\n+msgstr \"Elekti lingvon\"\n \n #: searx/templates/oscar/preferences.html:166\n #: searx/templates/oscar/preferences.html:172\n #: searx/templates/simple/preferences.html:91\n msgid \"Time range\"\n-msgstr \"\"\n+msgstr \"Tempa intervalo\"\n \n #: searx/templates/oscar/preferences.html:167\n #: searx/templates/oscar/preferences.html:171\n@@ -753,29 +754,29 @@ msgstr \"Maksimuma tempo\"\n \n #: searx/templates/oscar/preferences.html:248\n msgid \"This is the list of searx's instant answering modules.\"\n-msgstr \"\"\n+msgstr \"Tio ĉi estas listo de tuje respondantaj moduloj de Searx.\"\n \n #: searx/templates/oscar/preferences.html:252\n msgid \"Name\"\n-msgstr \"\"\n+msgstr \"Nomo\"\n \n #: searx/templates/oscar/preferences.html:253\n msgid \"Keywords\"\n-msgstr \"\"\n+msgstr \"Ŝlosilvortoj\"\n \n #: searx/templates/oscar/preferences.html:254\n msgid \"Description\"\n-msgstr \"\"\n+msgstr \"Priskribo\"\n \n #: searx/templates/oscar/preferences.html:255\n msgid \"Examples\"\n-msgstr \"\"\n+msgstr \"Ekzemploj\"\n \n #: searx/templates/oscar/preferences.html:275\n msgid \"\"\n \"This is the list of cookies and their values searx is storing on your \"\n \"computer.\"\n-msgstr \"Ĉi tiu estas la listo de kuketoj kaj siaj valoroj, kiujn searx konservas en via komputilo.\"\n+msgstr \"Ĉi tio estas listo de kuketoj kaj iliaj valoroj, kiujn searx konservas en via komputilo.\"\n \n #: searx/templates/oscar/preferences.html:276\n msgid \"With that list, you can assess searx transparency.\"\n@@ -791,13 +792,13 @@ msgstr \"Valoro\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"Search URL of the currently saved preferences\"\n-msgstr \"\"\n+msgstr \"Serĉo-URL kun aktuale konservitaj agordoj\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"\"\n+msgstr \"Rimarko: Precizigo de propraj agordoj en la serĉo-URL povas malaltigi privatecon per nevola diskonigo de la datumoj al alklikantaj retejoj.\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -806,12 +807,12 @@ msgstr \"Serĉrezultoj\"\n #: searx/templates/oscar/results.html:21\n #: searx/templates/simple/results.html:84\n msgid \"Try searching for:\"\n-msgstr \"\"\n+msgstr \"Provu serĉi:\"\n \n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"\"\n+msgstr \"Motoroj ne povas trovi rezultojn\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -830,7 +831,7 @@ msgstr \"statistikoj\"\n #: searx/templates/oscar/time-range.html:3\n #: searx/templates/simple/time-range.html:3\n msgid \"Anytime\"\n-msgstr \"Ie\"\n+msgstr \"Iam ajn\"\n \n #: searx/templates/oscar/time-range.html:6\n #: searx/templates/simple/time-range.html:6\n@@ -850,7 +851,7 @@ msgstr \"Pasinta monato\"\n #: searx/templates/oscar/time-range.html:15\n #: searx/templates/simple/time-range.html:15\n msgid \"Last year\"\n-msgstr \"\"\n+msgstr \"Pasinta jaro\"\n \n #: searx/templates/oscar/messages/first_time.html:6\n #: searx/templates/oscar/messages/no_data_available.html:3\n@@ -859,7 +860,7 @@ msgstr \"Atentu!\"\n \n #: searx/templates/oscar/messages/first_time.html:7\n msgid \"It look like you are using searx first time.\"\n-msgstr \"Ŝajnas, ke ĉi tiu estas via unua fojo uzante searx\"\n+msgstr \"Ŝajnas, ke ĉi tio estas via unua fojo, kiam vi uzas searx.\"\n \n #: searx/templates/oscar/messages/no_cookies.html:3\n msgid \"Information!\"\n@@ -867,7 +868,7 @@ msgstr \"Informoj!\"\n \n #: searx/templates/oscar/messages/no_cookies.html:4\n msgid \"currently, there are no cookies defined.\"\n-msgstr \"ĉi-momente, ne estas kuketoj difinitaj.\"\n+msgstr \"nun ne estas ajnaj kuketoj difinitaj.\"\n \n #: searx/templates/oscar/messages/no_data_available.html:4\n msgid \"There is currently no data available. \"\n@@ -876,17 +877,17 @@ msgstr \"Nun ne estas datumoj disponeblaj.\"\n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"\"\n+msgstr \"Motoroj ne povas trovi rezultojn.\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"\"\n+msgstr \"Bonvolu provi ĝin poste aŭ trovi aliajn searx-instancon.\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n msgid \"Sorry!\"\n-msgstr \"Mizera!\"\n+msgstr \"Pardonu!\"\n \n #: searx/templates/oscar/messages/no_results.html:15\n #: searx/templates/simple/messages/no_results.html:15\n@@ -952,7 +953,7 @@ msgstr \"kaŝi detalojn\"\n #: searx/templates/oscar/result_templates/torrent.html:7\n #: searx/templates/simple/result_templates/torrent.html:11\n msgid \"Filesize\"\n-msgstr \"Dosiergrando\"\n+msgstr \"Dosiergrandeco\"\n \n #: searx/templates/oscar/result_templates/torrent.html:9\n #: searx/templates/simple/result_templates/torrent.html:12\n@@ -1000,16 +1001,16 @@ msgstr \"Ŝarĝi pli...\"\n \n #: searx/templates/simple/base.html:31\n msgid \"No item found\"\n-msgstr \"\"\n+msgstr \"Nenio trovita\"\n \n #: searx/templates/simple/preferences.html:89\n msgid \"Supports selected language\"\n-msgstr \"\"\n+msgstr \"Subtenas elektitan lingvon\"\n \n #: searx/templates/simple/preferences.html:118\n msgid \"User interface\"\n-msgstr \"\"\n+msgstr \"Fasado\"\n \n #: searx/templates/simple/preferences.html:154\n msgid \"Privacy\"\n-msgstr \"\"\n+msgstr \"Privateco\"\ndiff --git a/searx/translations/es/LC_MESSAGES/messages.mo b/searx/translations/es/LC_MESSAGES/messages.mo\nindex c8c08316e9..358cce8cbd 100644\nBinary files a/searx/translations/es/LC_MESSAGES/messages.mo and b/searx/translations/es/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/eu/LC_MESSAGES/messages.mo b/searx/translations/eu/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..db58fdc84a\nBinary files /dev/null and b/searx/translations/eu/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/eu/LC_MESSAGES/messages.po b/searx/translations/eu/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..b6fa194e10\n--- /dev/null\n+++ b/searx/translations/eu/LC_MESSAGES/messages.po\n@@ -0,0 +1,1015 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# beriain , 2018\n+# beriain , 2018\n+# Txopi , 2016\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-11-06 16:39+0000\\n\"\n+\"Last-Translator: beriain \\n\"\n+\"Language-Team: Basque (http://www.transifex.com/asciimoo/searx/language/eu/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: eu\\n\"\n+\"Plural-Forms: nplurals=2; plural=(n != 1);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"denbora agortzea\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"salbuespena eskaeran\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"ustekabeko gelditzea\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"fitxategiak\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"orokorra\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"musika\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"multimedia soziala\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"irudiak\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"bideoak\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"it\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"berriak\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"mapa\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"zientzia\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"Ezarpen ez baliodunak, mesedez editatu zure hobespenak\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"Ezarpen ez baliodunak\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"bilaketa akatsa\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"duela {minutes} minutu\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"duela {hours} ordu eta {minutes} minutu\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"Ausazko balio sortzailea\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"Ausazko balio ezberdinak sortu\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"Funtzio estatistikoak\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"Parametroen {functions} zenbatu\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"Bilatzailearen denbora (seg)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"Orri kargak (seg)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Emaitza kopurua\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Balorazioak\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"Balorazioak emaitza bakoitzeko\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Erroreak\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(ZAHARKITUA)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"Sarrera hau hurrengoarekin ordezkatu da\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"Ez dago abstrakturik eskuragarri argitalpen honetarako.\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"HTTP loturak HTTPS bihurtu ahal denean\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"Korritze amaigabea\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"Hurrengo orria automatikoki kargatu uneko orriaren behekaldera mugitzerakoan\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"Berridatzi Open Access DOI\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"Ordainketa hormak sahiestu argitalpenen sartze-askeko bertsioetara berbidaliz ahal denean\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Emaitzen estekak nabigatzailearen fitxa berrietan ireki\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"Emaitzak leiho berdinean irekitzen dira lehenetsi bezala. Plugin honek lehenetsitako jokabidea aldatzen du estekak fitxa/leiho berrietan irekitzeko. (JavaScript behar du)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"Bilatu kategoria hautatzerakoan\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"Bilaketa egin kategoria hautatu bezain laster. Ezgaitu ezazu hainbat kategoria hautatu ahal izateko. (JavaScript behar du)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"Zure IPa erakutsi bilatutakoa \\\"ip\\\" bada eta zure user agenta bilatutakoa \\\"user agent\\\" bada.\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"URL aztarnariak kendu\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"Aztarnarien argumentuak kendu itzulitako URLtik\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"Vim antzeko laster-teklak\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"Emaitzetan zehar Vim bezalako tekla azkarrekin nabigatu (JavaScript behar du). Sakatu \\\"h\\\" tekla orri nagusian edo emaitzen orrian laguntza ikusteko.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"Orria ez da aurkitu\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"%(search_page)s(e)ra joan.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"bilaketa orria\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"hobespenak\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"honi buruz\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Hobespenak\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Lehenetsitako kategoriak\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Bilaketaren hizkuntza\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Lehenetsitako hizkuntza\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Interfazearen hizkuntza\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Osatze automatikoa\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"Irudietarako proxya\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Gaituta\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Desgaituta\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Metodoa\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"Bilaketa segurua\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Zorrotza\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Moderatua\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Bat ere ez\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Itxurak\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Kolorea\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Urdina (lehenetsia)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Bioleta\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Berdea\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Zian\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Laranja\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Gorria\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"Erabiliak izaten ari diren bilatzaileak\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"Bilatzailearen izena\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Kategoria\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Baimendu\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Blokeatu\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"Ezarpen hauek zure cookietan gurdetzen dira, honek zuri buruzko informaziorik ez gordetzea baimentzen digu.\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"Cookie hauek zure onurarako besterik ez dira, ez ditugu zure jarraipenik egiteko erabiltzen.\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"gorde\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Berrezarri lehenetsiak\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"atzera\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"Bilaketa URLa\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Emaitzak deskargatu\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Erantzunak\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Iradokizunak\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"aurreko orrialdea\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"hurrengo orrialdea\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Bilatu...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"Bilatzaileen estatistikak\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"jatorrizko testuingurua\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Ereilea\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Izaina\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"magnet lotura\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"torrent fitxategia\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Lupan sakatu bilaketa egiteko\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Emaitzak fitxa berrietan\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"Piztuta\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"Itzalita\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"cacheatuta\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Ezarpen aurreratuak\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Itxi\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Errorea!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Honek bultzatua\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"pribatutasun-errespetatzaile, metabilaketa motor hackeagarri bat\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"proxyatuta\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"onartua\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"ez onartua\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"Orokorra\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Bilatzaileak\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Pluginak\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Erantzun emaileak\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Cookieak\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"Zein hizkuntzan egin nahi duzu bilaketa?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Interfazearen hizkuntza aldatu\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Gauzak aurkitu idatzi bitartean\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"Irudien emaitzak searx bitartez proxyatu\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"Aldatu inprimakiak nola bidaltzen diren, ikasi gehiago eskaera metodoen inguruan\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"Edukia iragazi\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Searxen diseinua aldatu\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Gai honetarako estiloa hautatu\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Estiloa\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"Open Access DOI ebatzi\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"Argitalpenen sartze-askeko bertsioetara berbidali ahal denean (plugina behar du)\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Lasterbidea\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"Hautatutako hizkuntza\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Denbora tartea\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \" Batezbesteko denbora\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"Gehienezko denbora\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"Hau da searxen berehalako erantzunen moduluen zerrenda.\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Izena\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Gako-hitzak\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Deskripzioa\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Adibideak\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Hau searxek zure ordenagailuan gordetzen ari den cookien eta haien balioen zerrenda bat da.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"Zerrenda horrekin, searxen gardentasuna balioztatu dezakezu.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Cookiearen izena\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Balioa\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"Une honetan gordetako hobespenen bilaketa URLa\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"Oharra: bilaketa URLan ezarpen pertsonalizatuak zehazteak pribatutasuna txikiagotu dezake klikatutako erantzun guneetara datuak emanez\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Bilaketaren emaitzak\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"Saiatu hau bilatzen:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"Bilatzaileek ezin dute emaitzik lortu\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Estekak\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Bilaketa hasi\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"estatistikak\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"Edonoiz\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"Azken eguna\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"Azken astea\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"Azken hilabetea\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"Azken urtea\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"Kasu!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Searx lehen aldiz erabiltzen ari zarela ematen du.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Informazioa!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"une honetan, ez dago cookierik definitutik.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"Une honetan ez dago daturik eskuragarri.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"Bilatzaileek ezin dute emaitzik lortu.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"Mesedez, saiatu berriz beranduago edo bila ezazu beste searx instantzia bat.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Barkatu!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"ez dugu emaitzik aurkitu. Mesedez beste kontsulta bat egin edo bilatu kategoria gehiagotan.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Ondo egina!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"Ezarpenak ongi gorde dira.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"Hara!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Zerbait gaizki joan da.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"media erakutsi\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"media ezkutatu\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Irudia eskuratu\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Iturria ikusi\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"mapa erakutsi\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"mapa ezkutatu\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"xehetasunak erakutsi\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"xehetasunak ezkutatu\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Fitxategi neurria\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Byteak\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Fitxategi kopurua\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"erakutsi bideoa\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"ezkutatu bideoa\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Kargatu gehiago...\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"Ez da elementurik aurkitu\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Hautatutako hizkuntza onartzen du\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"Erabiltzailearen interfazea\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"Pribatutasuna\"\ndiff --git a/searx/translations/fa_IR/LC_MESSAGES/messages.mo b/searx/translations/fa_IR/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..4ef71416b3\nBinary files /dev/null and b/searx/translations/fa_IR/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/fa_IR/LC_MESSAGES/messages.po b/searx/translations/fa_IR/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..0e568e1db8\n--- /dev/null\n+++ b/searx/translations/fa_IR/LC_MESSAGES/messages.po\n@@ -0,0 +1,1015 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# Aurora, 2018\n+# Jim , 2017\n+# Mostafa Ahangarha , 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-04-16 16:41+0000\\n\"\n+\"Last-Translator: Aurora\\n\"\n+\"Language-Team: Persian (Iran) (http://www.transifex.com/asciimoo/searx/language/fa_IR/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: fa_IR\\n\"\n+\"Plural-Forms: nplurals=2; plural=(n > 1);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"پایان زمان
\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"خطا در درخواست\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"ایست ناگهانی\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"فایل ها
\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"فراگیر\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"موسیقی\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"رسانه اجتماعی\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"تصاویر
\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"ویدیو ها
\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"فناوری اطلاعات\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"اخبار\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"نقشه\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"دانش
\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"تنظیمات نادرست است، لطفا اولویت‌های جستجو را تغییر دهید\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"تنظیمات اشتباه\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"خطای جستجو\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} دقیقه پیش\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"{hours} ساعت و {minutes} دقیقه پیش\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"ایجاد کننده ی مقدار تصادفی\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"ایجاد مقادیر تصادفی متفاوت\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"توابع آماری\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"پردازش {عملکرد های} نشانوند ها
\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"زمان موتور(ثانیه)
\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"زمان بارگذاری صفحه (ثانیه)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"تعداد نتایج\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"امتیازات
\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"امتیازات بر نتیجه
\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"خطاها\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(OBSOLETE)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"این ورودی معلق شده است توسط\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"هیچ چکید ای برای این نشریه در دسترس نیست.
\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"تغییر پیوند های HTTP به HTTPS در صورت امکان\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"پایین رفتن بی‌پایان\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"بارگذاری خودکار صفحه بعد در صورت پیمایش تا پایین صفحه کنونی\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"بازنویسی Open Access DOI
\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"امتناع از منابع غیر رایگان با تغییر مسیر به نسخه ی رایگان نشریات اگر در دسترس باشد
\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"باز کردن لینک های نتیجه در برگه‌ی جدید مرورگر\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"به طور پیش‌فرض، نتایج در پنجره ی کنونی باز می‌شوند. این افزونه، رفتار پیش‌فرض را برای بازشدن پیوند در پنجره/برگه جدید تغییر می‌دهد. (نیازمند جاوااسکریپت)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"جستجو به محض انتخاب یک دسته
\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"جستجو به محض انتخاب یک دسته. برای انتخاب چند دسته این گزینه را غیرفعال کنید. (نیازمند جاواسکریپت)
\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"اگر آی پی شما در صورت جستجو برای 'ip' و نشان دادن عامل کاربر در صورت جستجو برای 'user agent'.
\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"از بین برنده ی آدرس ردیاب ها
\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"حذف نشانوند های ردیاب ها از آدرس برگشتی\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"کلیدهای میانبر شبیه Vim
\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"جابجایی در نتایج با کلیدهای میان‌بر مشابه Vim (نیازمند جاوااسکریپت). در صفحه اصلی و یا صفحه نتیجه، دکمه h را برای نمایش راهنما بفشارید.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"صفحه پیدا نشد\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"برو به%(search_page)s.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"صفحه جستجو
\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"تنظیمات
\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"درباره
\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"تنظیمات
\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"دسته‌بندی های پیش‌فرض\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"زبان جستجو\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"زبان پیش‌فرض\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"زبان رابط کاربری\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"تکمیل خودکار
\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"پراکسی تصویر
\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"فعال
\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"غیرفعال\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"روش
\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"جستجوی امن\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"سخت گیر
\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"متوسط
\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"هیچ
\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"تم ها
\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"رنگ\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"آبی (پیش‌فرض)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"بنفش\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"سبز
\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"فیروزه‌ای\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"نارنجی\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"قرمز\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"موتورهای جستجوی در حال استفاده\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"نام موتور\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"دسته\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"اجازه\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"انسداد
\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"این تنظیمات در کوکی های شما ذخیره شده اند، این به ما اجازه می دهد این اطلاعات را درباره شما ذخیره نکنیم.
\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"این کوکی ها برای راحتی شماست، ما از این کوکی برای ردیابی شما استفاده نمیکنیم.
\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"ذخیره\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"بازنشانی پیشفرض ها
\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"عقب
\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"آدرس جستجو
\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"نتایج دانلود
\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"پاسخ ها
\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"پیشنهادات\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"صفحه پیش\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"صفحه بعد\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"جستجو برای …\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"آمار موتور
\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"متن اصلی
\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"سیدر
\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"لیچر
\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"لینک مگنت
\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"فایل تورنت
\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"برای اجرای جستجو روی ذره بین کلیک کنید
\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"نتایج در برگه جدید\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"روشن
\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"خاموش
\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"ذخیره شده
\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"تنظیمات پیشرفته
\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"بستن
\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"خطا!
\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"قدرت گرفته از
\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"یک ابر موتور جستجوی حافظ حریم شخصی\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"پراکسی شده
\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"پشتیبانی شده
\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"پشتیبانی نشده
\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"کلی
\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"موتور ها
\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"افزونه ها\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"پاسخگو ها
\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"کوکی ها
\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"چه زبانی را برای جستجو ترجیح می‌دهید؟\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"تغییر زبان رابط کاربری\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"یافتن مطالب حین نوشتن\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"پراکسی کردن نتایج تصویری از طریق searx
\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"چگونگی ثبت فرم ها را تغییر دهید، درباره ی متود های درخواست بیشتر بدانید\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"فیلتر کردن محتوا\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"رابط کاربری searx را تغییر دهید
\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"سبک این پوسته را انتخاب کنید\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"سبک\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"حل کننده ی Open Access DOI
\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"هدایت به نسخه رایگان نشریات اگر در دسترس باشد(نیازمند به افزونه)
\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"میانبر
\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"زبان انتخابی
\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"بازه ی زمانی
\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"زمان میانگین\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"حداکثر زمان\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"این، فهرست ماژول‌های پاسخ بلادرنگ searx است.\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"نام\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"کلیدواژه ها
\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"شرح
\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"مثال ها
\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"این، لیست کوکی‌ها و مقادیری است که searx روی دستگاه شما ذخیره می‌کند.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"با آن لیست، می‌توانید شفافیت searx را ارزیابی کنید.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"نام کوکی
\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"مقدار
\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"آدرس جستجو بر اساس تنظیمات ذخیره شده
\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"هشدار: تعیین تنظیمات شخصی در آدرس جستجو میتواند حریم شخصی شما را به خطر بیاندازد با درز کردن اطلاعات به سایت های نتایج انتخاب شده.
\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"نتایج جستجو
\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"تلاش کنید برای جستجوی:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"موتور ها قادر به دریافت نتایج نیستند
\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"لینک ها
\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"شروع جستجو
\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"آمار
\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"هر زمانی
\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"روز گذشته\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"هفته گذشته\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"ماه گذشته\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"سال گذشته\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"بالاخره!
\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"به نظر می‌رسد اولین باری است که از searx استفاده می‌کنید.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"اطلاعات!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"در حال حاضر کوکی‌ای تعریف نشده است.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"در حال حاضر هیچ داده‌ای در دسترس نیست.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"موتورها قادر به دریافت نتایج نیستند.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"لطفا بعدا دوباره تلاش کنید و یا به دنبال نمونه‌ای دیگری از searx بگردید.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"ببخشید!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"چیزی پیدا نشد. لطفا جستار دیگری را امتحان و یا در دسته‌ های بیشتری جستجو کنید.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"آفرین!
\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"تنظیمات با موفقیت ذخیره شد!
\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"ای وای! خراب شد!
\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"مشکلی رخ داد.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"نمایش رسانه
\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"پنهان سازی رسانه
\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"دریافت تصویر\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"نمایش منبع\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"نمایش نقشه\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"پنهان‌سازی نقشه\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"نمایش جزئیات\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"پنهان‌سازی جزئیات\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"اندازه فایل
\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"بایت\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"کیلوبایت\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"مگابایت\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"گیگابایت\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"ترابایت\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"تعداد فایل ها\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"نمایش ویدئو\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"پنهان‌سازی ویدئو\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"بیشتر…
\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"چیزی پیدا نشد\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"زبان انتخاب شده را پشتیبانی می‌کند\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"رابط کاربری\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"حریم خصوصی\"\ndiff --git a/searx/translations/fi/LC_MESSAGES/messages.mo b/searx/translations/fi/LC_MESSAGES/messages.mo\nindex c6aa15235d..b3905ca2f4 100644\nBinary files a/searx/translations/fi/LC_MESSAGES/messages.mo and b/searx/translations/fi/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/fil/LC_MESSAGES/messages.mo b/searx/translations/fil/LC_MESSAGES/messages.mo\nindex df14318fbf..f1b4e75c77 100644\nBinary files a/searx/translations/fil/LC_MESSAGES/messages.mo and b/searx/translations/fil/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/fr/LC_MESSAGES/messages.mo b/searx/translations/fr/LC_MESSAGES/messages.mo\nindex f9a72717a6..7fd3ee8917 100644\nBinary files a/searx/translations/fr/LC_MESSAGES/messages.mo and b/searx/translations/fr/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/fr/LC_MESSAGES/messages.po b/searx/translations/fr/LC_MESSAGES/messages.po\nindex 60c8038059..4c076ddbf7 100644\n--- a/searx/translations/fr/LC_MESSAGES/messages.po\n+++ b/searx/translations/fr/LC_MESSAGES/messages.po\n@@ -3,7 +3,7 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n-# Alexandre Flament , 2017\n+# Alexandre Flament , 2017-2018\n # Benjamin Sonntag , 2014\n # Cqoicebordel , 2014\n # Cqoicebordel , 2014-2017\n@@ -16,8 +16,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-03 12:00+0000\\n\"\n-\"Last-Translator: Cqoicebordel \\n\"\n+\"PO-Revision-Date: 2018-04-03 08:18+0000\\n\"\n+\"Last-Translator: Alexandre Flament \\n\"\n \"Language-Team: French (http://www.transifex.com/asciimoo/searx/language/fr/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -84,7 +84,7 @@ msgstr \"Paramètres non valides, veuillez éditer vos préférences\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"Réglages non valides\"\n+msgstr \"Paramètres non valides\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n@@ -100,7 +100,7 @@ msgstr \"il y a {hours} heure(s), {minutes} minute(s)\"\n \n #: searx/answerers/random/answerer.py:53\n msgid \"Random value generator\"\n-msgstr \"Générateur aléatoire\"\n+msgstr \"Générateur de valeur aléatoire\"\n \n #: searx/answerers/random/answerer.py:54\n msgid \"Generate different random values\"\n@@ -138,7 +138,7 @@ msgstr \"Score par résultat\"\n \n #: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n msgid \"Errors\"\n-msgstr \"Erreurs\"\n+msgstr \"Erreur\"\n \n #: searx/engines/pdbe.py:87\n msgid \"{title}&nbsp;(OBSOLETE)\"\n@@ -663,7 +663,7 @@ msgstr \"Plugins\"\n #: searx/templates/oscar/preferences.html:16\n #: searx/templates/oscar/preferences.html:245\n msgid \"Answerers\"\n-msgstr \"Répondants\"\n+msgstr \"Réponses instantanées\"\n \n #: searx/templates/oscar/preferences.html:17\n #: searx/templates/oscar/preferences.html:272\ndiff --git a/searx/translations/gl/LC_MESSAGES/messages.mo b/searx/translations/gl/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..30d7b0dd60\nBinary files /dev/null and b/searx/translations/gl/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/gl/LC_MESSAGES/messages.po b/searx/translations/gl/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..3e1e2230fd\n--- /dev/null\n+++ b/searx/translations/gl/LC_MESSAGES/messages.po\n@@ -0,0 +1,1013 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# Xosé M. Lamas , 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-04-20 11:00+0000\\n\"\n+\"Last-Translator: Xosé M. Lamas \\n\"\n+\"Language-Team: Galician (http://www.transifex.com/asciimoo/searx/language/gl/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: gl\\n\"\n+\"Plural-Forms: nplurals=2; plural=(n != 1);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"caducidade\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"excepción na petición\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"fallo non agardado\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"ficheiros\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"xeral\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"música\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"Medios sociais\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"imaxes\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"vídeos\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"TIC\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"novas\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"mapa\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"ciencia\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"Axustes non válidos, por favor edite a configuración\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"Axustes non válidos\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"fallo na busca\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"hai {minutes} minuto(s)\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"hai {hours} hora(s), {minutes} minuto(s)\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"Xerador de valor aleatorio\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"Xerar diferentes valores aleatorios\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"Funcións de estatística\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"Calcule {functions} dos argumentos\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"Tempo de busca (sec)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"Cargou en (seg)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Número de resultados\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Puntuacións\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"Puntuacións por resultado\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Fallos\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(OBSOLETO)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"Esta entrada foi proporcionada por\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"Non hai dispoñible un extracto para esta publicación.\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"Reescribir ligazóns HTTP a HTTPS si fose posible\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"Desplazamento infinito\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"Cargar automáticamente a seguinte páxina ao desplazarse ao fondo da páxina actual\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"Reescritura Open Access DOI\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"Evitar muros de pago redirecciionando a versións públicas das publicacións cando estén dispoñibles\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Abrir ligazóns de resultados en novas lapelas do navegador\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"Por omisión, os resultados ábrense na mesma lapela. Este engadido sobreescribe o comportamento por omisión para abrir as ligazóns en novas lapelas/ventás. (Require JavaScript)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"Busca en categoría seleccionada\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"Busca ao momento si hai unha categoría seleccionada. Desactivar para seleccionar múltiples categorías. (Require JavaScript)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"Mostra o seu IP si a consulta é \\\"ip\\\" e o seu Use Agent si a consulta contén \\\"user agent\\\".\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"Eliminador de rastrexadores na URL\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"Elimina os argumentos de rastrexo da URL devolta\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"Atallos como os de Vim\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"Navegar nos resultados da busca con atallos como os de Vim (require JavaScript). Pulse \\\"h\\\" na pantalla principal ou de resultados para obter axuda.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"Páxina non atopada\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"Ir a %(search_page)s\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"páxina de busca\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"axustes\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"Sobre\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Axustes\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Categorías por omisión\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Idioma de busca\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Idioma por omisión\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Idioma da interface\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Autocompletar\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"Proxy de imaxes\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Activado\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Desactivado\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Método\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"Busca segura\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Stricta\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Moderada\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Ningunha\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Decorado\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Cor\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Azul (por omisión)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Violeta\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Verde\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Cian\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Laranxa\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Vermello\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"Motores de busca utilizados actualmente\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"Nome do motor\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Categoría\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Permitir\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Bloquear\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"Estos axustes gárdanse en testemuños, esto permítenos non ter que gardar ningún datos sobre vostede.\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"Estos testemuños son para a súa conveniencia, non utilizamos estos testemuños para rastrexala.\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"gardar\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Restablecer\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"atrás\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"Busca URL\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Descargar resultados\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Respostas\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Suxestións\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"páxina anterior\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"páxina seguinte\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Buscar por...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"Estatísticas do buscador\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"contexto orixinal\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Sementadora\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Cliente\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"ligazón magnet\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"ficheiro torrent\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Pulse na lupa para realizar a busca\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Resultados en novas lapelas\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"On\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"Off\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"en memoria\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Axustes avanzados\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Pechar\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Fallo!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Proporcionado por\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"un metabuscador configurable e respetuoso coa intimidade\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"a través de proxy\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"soportado\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"non soportado\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"Xeral\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Motores\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Engadidos\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Respostas\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Testemuños\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"Qué idioma prefire para buscar?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Cambiar o idioma da disposición\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Buscar nas cousas metras escribe\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"Utilizar o proxy de searx para as imaxes dos resultados\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"Cambiar cómo se envían os formularios, coñeza máis sobre os métodos de consulta\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"Filtro de contido\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Cambiar a disposición de searx\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Escolla o estilo para este decorado\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Estilo\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"Resolutor Open Access DOI\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"Redireccionar a versións abertas das publicacións cando estén dispoñibles (require o engadido)\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Atallo\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"Idioma seleccionado\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Rango temporal\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"Tempo medio\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"Tempo máx.\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"Este é o listado dos módulos de respostas instantáneas de searx\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Nome\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Palabras chave\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Descrición\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Exemplos\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Este é o listados dos testemuños e os seus valores que searx almacena na súa computadora.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"Con esta lista vostede pode comprobar a transparencia de searx.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Nome do testemuño\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Valor\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"URL de busca dos axustes gardados actualmente.\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"Nota: indicando axustes personalizados na URL de busca pode reducir a súa intimidade ao filtrar datos aos sitios web dos resultados.\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Resultados da busca\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"Intente buscar:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"Os buscadores non obtiveron resultados\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Ligazóns\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Iniciar busca\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"estatísticas\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"Calquer momento\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"Último día\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"Última semana\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"Último mes\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"Último ano\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"Heads up!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Semella que é a primeira vez que utiliza searx.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Información!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"actualmente non hai testemuños establecidos.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"Non hai datos dispoñibles.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"Os buscadores non poden obter resultados.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"Por favor, inténteo de novo máis tarde ou busque outra instancia de searx.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Sentímolo!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"non atopamos ningún resultado. Por favor, utilice outra consulta ou busque en máis categorías.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Ben feito!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"Gardáronse correctamente os Axustes.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"Vaia!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Algo fallou.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"mostrar medios\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"agochar medios\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Obter imaxe\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Ver fonte\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"mostrar mapa\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"agochar mapa\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"mostrar detalles\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"agochar detalles\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Tamaño do ficheiro\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Bytes\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Número de ficheiros\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"mostrar vídeo\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"agochar vídeo\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Cargar máis...\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"Non se atoparon elementos\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Soporta o idioma seleccionado\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"Interface de usuaria\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"Intimidade\"\ndiff --git a/searx/translations/he/LC_MESSAGES/messages.mo b/searx/translations/he/LC_MESSAGES/messages.mo\nindex 10bb25c50c..c034025007 100644\nBinary files a/searx/translations/he/LC_MESSAGES/messages.mo and b/searx/translations/he/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/hr/LC_MESSAGES/messages.mo b/searx/translations/hr/LC_MESSAGES/messages.mo\nindex ee986d5c9d..9e232a1513 100644\nBinary files a/searx/translations/hr/LC_MESSAGES/messages.mo and b/searx/translations/hr/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/hu/LC_MESSAGES/messages.mo b/searx/translations/hu/LC_MESSAGES/messages.mo\nindex 4e06f2fd37..392dc99dff 100644\nBinary files a/searx/translations/hu/LC_MESSAGES/messages.mo and b/searx/translations/hu/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/it/LC_MESSAGES/messages.mo b/searx/translations/it/LC_MESSAGES/messages.mo\nindex 49dabadcf1..53eca9e674 100644\nBinary files a/searx/translations/it/LC_MESSAGES/messages.mo and b/searx/translations/it/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/it/LC_MESSAGES/messages.po b/searx/translations/it/LC_MESSAGES/messages.po\nindex b2db9b3f7f..c2d5c85887 100644\n--- a/searx/translations/it/LC_MESSAGES/messages.po\n+++ b/searx/translations/it/LC_MESSAGES/messages.po\n@@ -5,18 +5,20 @@\n # Translators:\n # Adam Tauber , 2018\n # caoswave, 2016\n-# caoswave, 2016-2017\n+# caoswave, 2016-2018\n # dp , 2014\n # dp , 2014,2017\n+# Federico , 2018\n # Luca C , 2017\n # Luc , 2015\n+# Random_R, 2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2018-02-01 22:21+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-10-16 15:53+0000\\n\"\n+\"Last-Translator: caoswave\\n\"\n \"Language-Team: Italian (http://www.transifex.com/asciimoo/searx/language/it/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -31,7 +33,7 @@ msgstr \"tempo scaduto\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"eccezione della richiesta\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n@@ -149,7 +151,7 @@ msgstr \"Questa voce è stata sostituita da\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"\"\n+msgstr \"Nessun sommario disponibile per questa pubblicazione\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n@@ -199,7 +201,7 @@ msgstr \"Esegui la ricerca immediatamente se una categoria è selezionata. Disabi\n msgid \"\"\n \"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n \"contains \\\"user agent\\\".\"\n-msgstr \"Mostra il tuo IP se hai cercato \\\\\\\"ip\\\\\\\" ed il tuo user agent se hai cercato \\\\\\\"user agent\\\\\\\".\"\n+msgstr \"Mostra il tuo IP se hai cercato \\\"ip\\\" ed il tuo user agent se hai cercato \\\"user agent\\\".\"\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n@@ -217,7 +219,7 @@ msgstr \"Scorciatoie in stile Vim\"\n msgid \"\"\n \"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n \"\\\"h\\\" key on main or result page to get help.\"\n-msgstr \"Usa comandi in stile Vim per navigare tra i risultati (JavaScript necessario). Premi il tasto \\\\\\\"h\\\\\\\" per visualizzare la finestra d'aiuto.\"\n+msgstr \"Usa comandi in stile Vim per navigare tra i risultati (JavaScript necessario). Premi il tasto \\\"h\\\" per visualizzare la finestra d'aiuto.\"\n \n #: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n #: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n@@ -718,13 +720,13 @@ msgstr \"Stile\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"Resolver Open Access DOI\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"Indirizza a versioni open-access delle pubblicazioni quando disponibili (plugin richiesto)\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n@@ -795,13 +797,13 @@ msgstr \"Valore\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"Search URL of the currently saved preferences\"\n-msgstr \"\"\n+msgstr \"Cerca URL delle preferenze attualmente salvate\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"\"\n+msgstr \"Nota: specificando impostazioni personalizzate nella ricerca URL può ridurre la privacy facendo traperlare dati ai siti cliccati\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -815,7 +817,7 @@ msgstr \"Prova a cercare:\"\n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"\"\n+msgstr \"I motori di ricerca non riescono a recuperare risultati\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -880,7 +882,7 @@ msgstr \"Non ci sono dati attualmente disponibili.\"\n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"\"\n+msgstr \"I motori di ricerca non riescono a recuperare risultati\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\ndiff --git a/searx/translations/ja/LC_MESSAGES/messages.mo b/searx/translations/ja/LC_MESSAGES/messages.mo\nindex 39ea698cbc..eb1267287a 100644\nBinary files a/searx/translations/ja/LC_MESSAGES/messages.mo and b/searx/translations/ja/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/ja/LC_MESSAGES/messages.po b/searx/translations/ja/LC_MESSAGES/messages.po\nindex 11a297ca28..24d2c359ef 100644\n--- a/searx/translations/ja/LC_MESSAGES/messages.po\n+++ b/searx/translations/ja/LC_MESSAGES/messages.po\n@@ -3,11 +3,12 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n-# Akio Nishimura , 2016-2017\n+# Akio Nishimura , 2016-2018\n # Thomas Pointhuber, 2014-2015\n # FIRST AUTHOR , 2014,2016\n # Lucas Phillips , 2015\n # Max , 2015\n+# Nobuhiro Iwamatsu , 2018\n # pointhi, 2014\n # Thomas Pointhuber, 2015-2016\n msgid \"\"\n@@ -15,8 +16,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-08-13 03:03+0000\\n\"\n+\"Last-Translator: Nobuhiro Iwamatsu \\n\"\n \"Language-Team: Japanese (http://www.transifex.com/asciimoo/searx/language/ja/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -27,15 +28,15 @@ msgstr \"\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"タイムアウト\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"例外要求\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"予期しないクラッシュ\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -83,7 +84,7 @@ msgstr \"不正な設定です。設定を編集してください。\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"不正な設定\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n@@ -149,11 +150,11 @@ msgstr \"このエントリーの優先\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"\"\n+msgstr \"この出版物には要約がありません。\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n-msgstr \"可能ならばHTTPリンクをHTTPSリンクに書き換える\"\n+msgstr \"可能ならば HTTP リンクを HTTPS リンクに書き換える\"\n \n #: searx/plugins/infinite_scroll.py:3\n msgid \"Infinite scroll\"\n@@ -165,7 +166,7 @@ msgstr \"現在のページの下端でスクロールすると自動的に次の\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"オープンアクセス DOI リライト\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n@@ -183,7 +184,7 @@ msgstr \"検索結果のリンクを新しいタブで開く\"\n msgid \"\"\n \"Results are opened in the same window by default. This plugin overwrites the\"\n \" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n-msgstr \"デフォルトでは結果は同じウィンドウで開きます。このプラグインはデフォルトの動作を書き換えて新しいタブ/ウィンドウで開くようにします。(JavaScriptが必要です)\"\n+msgstr \"デフォルトでは結果は同じウィンドウで開きます。このプラグインはデフォルトの動作を書き換えて新しいタブ/ウィンドウで開くようにします。(JavaScript が必要です)\"\n \n #: searx/plugins/search_on_category_select.py:18\n msgid \"Search on category select\"\n@@ -193,37 +194,37 @@ msgstr \"カテゴリ選択したら検索を実行\"\n msgid \"\"\n \"Perform search immediately if a category selected. Disable to select \"\n \"multiple categories. (JavaScript required)\"\n-msgstr \"カテゴリが選択されたときに検索を実行します。複数のカテゴリを選択する場合は無効にします。(JavaScriptが必要です)\"\n+msgstr \"カテゴリが選択されたときに検索を実行します。複数のカテゴリを選択する場合は無効にします。(JavaScript が必要です)\"\n \n #: searx/plugins/self_info.py:20\n msgid \"\"\n \"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n \"contains \\\"user agent\\\".\"\n-msgstr \"クエリが \\\"ip\\\" の場合にあなたのIPを、クエリに\\\"user agent\\\"が含まれる場合にあなたのユーザーエージェントを表示します。\"\n+msgstr \"クエリが \\\"ip\\\" の場合にあなたのIPを、クエリに \\\"user agent\\\" が含まれる場合にあなたのユーザーエージェントを表示します。\"\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n-msgstr \"トラッカーURLリムーバー\"\n+msgstr \"トラッカー URL リムーバー\"\n \n #: searx/plugins/tracker_url_remover.py:27\n msgid \"Remove trackers arguments from the returned URL\"\n-msgstr \"返されたURLからトラッカー引数を消去します。\"\n+msgstr \"返された URL からトラッカー引数を消去する\"\n \n #: searx/plugins/vim_hotkeys.py:3\n msgid \"Vim-like hotkeys\"\n-msgstr \"Vim風のホットキー\"\n+msgstr \"Vim 風のホットキー\"\n \n #: searx/plugins/vim_hotkeys.py:4\n msgid \"\"\n \"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n \"\\\"h\\\" key on main or result page to get help.\"\n-msgstr \"検索結果をVim風のホットキーで操作します(JavaScriptが必要)。メインページまたは検索結果ページで\\\"h\\\"キーを押してヘルプを表示します。\"\n+msgstr \"検索結果をVim 風のホットキーで操作します(JavaScript が必要)。メインページまたは検索結果ページで \\\"h\\\" キーを押してヘルプを表示します。\"\n \n #: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n #: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n #: searx/templates/simple/404.html:4\n msgid \"Page not found\"\n-msgstr \"ページが見付かりません\"\n+msgstr \"ページが見つかりません\"\n \n #: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n #: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n@@ -315,14 +316,14 @@ msgstr \"画像プロキシ\"\n #: searx/templates/oscar/preferences.html:72\n #: searx/templates/simple/preferences.html:169\n msgid \"Enabled\"\n-msgstr \"有効にする\"\n+msgstr \"有効\"\n \n #: searx/templates/courgette/preferences.html:49\n #: searx/templates/legacy/preferences.html:50\n #: searx/templates/oscar/preferences.html:73\n #: searx/templates/simple/preferences.html:170\n msgid \"Disabled\"\n-msgstr \"使用不可能にする\"\n+msgstr \"無効\"\n \n #: searx/templates/courgette/preferences.html:54\n #: searx/templates/legacy/preferences.html:55\n@@ -489,7 +490,7 @@ msgstr \"戻る\"\n #: searx/templates/oscar/results.html:136\n #: searx/templates/simple/results.html:58\n msgid \"Search URL\"\n-msgstr \"URLを検索する\"\n+msgstr \"URL を検索する\"\n \n #: searx/templates/courgette/results.html:16\n #: searx/templates/legacy/results.html:17\n@@ -623,7 +624,7 @@ msgstr \"エラー!\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"Powered by\"\n-msgstr \"提供:\"\n+msgstr \"Powered by\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"a privacy-respecting, hackable metasearch engine\"\n@@ -636,11 +637,11 @@ msgstr \"プロキシされている\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n-msgstr \"\"\n+msgstr \"サポート\"\n \n #: searx/templates/oscar/macros.html:96\n msgid \"not supported\"\n-msgstr \"\"\n+msgstr \"未サポート\"\n \n #: searx/templates/oscar/preferences.html:13\n #: searx/templates/oscar/preferences.html:22\n@@ -672,12 +673,12 @@ msgstr \"クッキー\"\n #: searx/templates/oscar/preferences.html:42\n #: searx/templates/simple/preferences.html:48\n msgid \"What language do you prefer for search?\"\n-msgstr \"検索に使う言語はどれが良いですか?\"\n+msgstr \"検索に使う言語はどれが良いですか?\"\n \n #: searx/templates/oscar/preferences.html:48\n #: searx/templates/simple/preferences.html:128\n msgid \"Change the language of the layout\"\n-msgstr \"表示する言語を変更できます\"\n+msgstr \"表示する言語を変更\"\n \n #: searx/templates/oscar/preferences.html:58\n #: searx/templates/simple/preferences.html:60\n@@ -687,7 +688,7 @@ msgstr \"入力補助に使う検索エンジン\"\n #: searx/templates/oscar/preferences.html:69\n #: searx/templates/simple/preferences.html:173\n msgid \"Proxying image results through searx\"\n-msgstr \"画像検索結果をsearxでプロキシする\"\n+msgstr \"画像検索結果を searx でプロキシする\"\n \n #: searx/templates/oscar/preferences.html:78\n msgid \"\"\n@@ -704,7 +705,7 @@ msgstr \"コンテンツをフィルタリングする\"\n #: searx/templates/oscar/preferences.html:97\n #: searx/templates/simple/preferences.html:139\n msgid \"Change searx layout\"\n-msgstr \"searxのレイアウトの変更\"\n+msgstr \"Searx のレイアウトの変更\"\n \n #: searx/templates/oscar/preferences.html:106\n #: searx/templates/oscar/preferences.html:111\n@@ -718,13 +719,13 @@ msgstr \"スタイル\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"オープンアクセス DOI リゾルバー\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"利用可能な場合(プラグインが必要)、オープンアクセス版の出版物にリダイレクトする\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n@@ -735,7 +736,7 @@ msgstr \"ショートカット\"\n #: searx/templates/oscar/preferences.html:164\n #: searx/templates/oscar/preferences.html:174\n msgid \"Selected language\"\n-msgstr \"\"\n+msgstr \"選択された言語\"\n \n #: searx/templates/oscar/preferences.html:166\n #: searx/templates/oscar/preferences.html:172\n@@ -757,7 +758,7 @@ msgstr \"最大時間\"\n \n #: searx/templates/oscar/preferences.html:248\n msgid \"This is the list of searx's instant answering modules.\"\n-msgstr \"これはsearxの即席回答モジュールのリストです。\"\n+msgstr \"これは searx の即席回答モジュールのリストです。\"\n \n #: searx/templates/oscar/preferences.html:252\n msgid \"Name\"\n@@ -783,7 +784,7 @@ msgstr \"これはクッキーのリストで、これらの値はあなたのコ\n \n #: searx/templates/oscar/preferences.html:276\n msgid \"With that list, you can assess searx transparency.\"\n-msgstr \"このリストによって、あなたはsearxの透明性を評価できます。\"\n+msgstr \"このリストによって、あなたは searx の透明性を評価できます。\"\n \n #: searx/templates/oscar/preferences.html:281\n msgid \"Cookie name\"\n@@ -795,13 +796,13 @@ msgstr \"値\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"Search URL of the currently saved preferences\"\n-msgstr \"\"\n+msgstr \"現在保存されている設定の検索 URL\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"\"\n+msgstr \"注意: 検索 URL にカスタム設定を指定すると、クリックした結果サイトにデータが漏洩し、プライバシーが低下する恐れがあります。\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -810,12 +811,12 @@ msgstr \"検索結果\"\n #: searx/templates/oscar/results.html:21\n #: searx/templates/simple/results.html:84\n msgid \"Try searching for:\"\n-msgstr \"\"\n+msgstr \"検索:\"\n \n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"\"\n+msgstr \"エンジンは結果を取得できません\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -859,15 +860,15 @@ msgstr \"去年\"\n #: searx/templates/oscar/messages/first_time.html:6\n #: searx/templates/oscar/messages/no_data_available.html:3\n msgid \"Heads up!\"\n-msgstr \"お知らせ\"\n+msgstr \"気をつけて!\"\n \n #: searx/templates/oscar/messages/first_time.html:7\n msgid \"It look like you are using searx first time.\"\n-msgstr \"searxを使うのは初めてようですね。\"\n+msgstr \"Searxを使うのは初めてようですね。\"\n \n #: searx/templates/oscar/messages/no_cookies.html:3\n msgid \"Information!\"\n-msgstr \"お知らせ\"\n+msgstr \"お知らせ!\"\n \n #: searx/templates/oscar/messages/no_cookies.html:4\n msgid \"currently, there are no cookies defined.\"\n@@ -880,12 +881,12 @@ msgstr \"現在データがありません。\"\n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"\"\n+msgstr \"エンジンは結果を取得できません。\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"\"\n+msgstr \"後でやり直すか、別の searx インスタンスを探して下さい。\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n@@ -901,7 +902,7 @@ msgstr \"検索結果はありませんでした。別カテゴリで、他のク\n \n #: searx/templates/oscar/messages/save_settings_successfull.html:7\n msgid \"Well done!\"\n-msgstr \"あっぱれ。\"\n+msgstr \"あっぱれ!\"\n \n #: searx/templates/oscar/messages/save_settings_successfull.html:8\n msgid \"Settings saved successfully.\"\n@@ -909,7 +910,7 @@ msgstr \"設定の保存に成功しました。\"\n \n #: searx/templates/oscar/messages/unknow_error.html:7\n msgid \"Oh snap!\"\n-msgstr \"ちぇっ\"\n+msgstr \"ちぇっ!\"\n \n #: searx/templates/oscar/messages/unknow_error.html:8\n msgid \"Something went wrong.\"\n@@ -956,7 +957,7 @@ msgstr \"詳細を隠す\"\n #: searx/templates/oscar/result_templates/torrent.html:7\n #: searx/templates/simple/result_templates/torrent.html:11\n msgid \"Filesize\"\n-msgstr \"ファイル・サイズ\"\n+msgstr \"ファイルサイズ\"\n \n #: searx/templates/oscar/result_templates/torrent.html:9\n #: searx/templates/simple/result_templates/torrent.html:12\n@@ -1004,16 +1005,16 @@ msgstr \"もっと見る…\"\n \n #: searx/templates/simple/base.html:31\n msgid \"No item found\"\n-msgstr \"\"\n+msgstr \"アイテムが見つかりません\"\n \n #: searx/templates/simple/preferences.html:89\n msgid \"Supports selected language\"\n-msgstr \"選択している言語のサポート\"\n+msgstr \"選択された言語のサポート\"\n \n #: searx/templates/simple/preferences.html:118\n msgid \"User interface\"\n-msgstr \"\"\n+msgstr \"ユーザーインターフェース\"\n \n #: searx/templates/simple/preferences.html:154\n msgid \"Privacy\"\n-msgstr \"\"\n+msgstr \"プライバシー\"\ndiff --git a/searx/translations/nl/LC_MESSAGES/messages.mo b/searx/translations/nl/LC_MESSAGES/messages.mo\nindex 15fcacbc4a..057853be52 100644\nBinary files a/searx/translations/nl/LC_MESSAGES/messages.mo and b/searx/translations/nl/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/nl/LC_MESSAGES/messages.po b/searx/translations/nl/LC_MESSAGES/messages.po\nindex 1fc8700f95..b4e0614025 100644\n--- a/searx/translations/nl/LC_MESSAGES/messages.po\n+++ b/searx/translations/nl/LC_MESSAGES/messages.po\n@@ -3,7 +3,7 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n-# André Koot , 2014-2017\n+# André Koot , 2014-2018\n # Nathan Follens, 2015-2018\n # Rejo Zenger , 2016-2017\n msgid \"\"\n@@ -11,8 +11,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2018-02-15 22:43+0000\\n\"\n-\"Last-Translator: Nathan Follens\\n\"\n+\"PO-Revision-Date: 2018-09-22 06:46+0000\\n\"\n+\"Last-Translator: André Koot \\n\"\n \"Language-Team: Dutch (http://www.transifex.com/asciimoo/searx/language/nl/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -835,7 +835,7 @@ msgstr \"Altijd\"\n #: searx/templates/oscar/time-range.html:6\n #: searx/templates/simple/time-range.html:6\n msgid \"Last day\"\n-msgstr \"Voorbije dag\"\n+msgstr \"Gisteren\"\n \n #: searx/templates/oscar/time-range.html:9\n #: searx/templates/simple/time-range.html:9\n@@ -881,7 +881,7 @@ msgstr \"Zoekmachines konden geen resultaten ophalen.\"\n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"Probeer het later opnieuw, of gebruik een andere instantie van searx.\"\n+msgstr \"Probeer het later opnieuw, of gebruik een andere searx server.\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\ndiff --git a/searx/translations/nl_BE/LC_MESSAGES/messages.mo b/searx/translations/nl_BE/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..e233f5c897\nBinary files /dev/null and b/searx/translations/nl_BE/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/nl_BE/LC_MESSAGES/messages.po b/searx/translations/nl_BE/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..c4ef0228ac\n--- /dev/null\n+++ b/searx/translations/nl_BE/LC_MESSAGES/messages.po\n@@ -0,0 +1,1013 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# Nathan Follens, 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-06-24 07:59+0000\\n\"\n+\"Last-Translator: Nathan Follens\\n\"\n+\"Language-Team: Dutch (Belgium) (http://www.transifex.com/asciimoo/searx/language/nl_BE/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: nl_BE\\n\"\n+\"Plural-Forms: nplurals=2; plural=(n != 1);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"time-out\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"aanvraaguitzondering\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"onverwachte crash\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"bestanden\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"algemeen\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"muziek\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"sociale media\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"afbeeldingen\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"video’s\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"IT\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"nieuws\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"kaart\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"wetenschap\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"Ongeldige instellingen, werkt uw voorkeuren bij\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"Ongeldige instellingen\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"zoekfout\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} min geleden\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"{hours} uur, {minutes} min geleden\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"Willekeurigewaardegenerator\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"Genereert verschillende willekeurige waarden\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"Statistische functies\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"Berekent {functions} van de argumenten\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"Snelheid zoekmachien (sec)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"Laden van pagina’s (sec)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Aantal zoekresultaten\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Scores\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"Scores per zoekresultaat\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Fouten\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(VEROUDERD)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"Dit object is vervangen door\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"Voor deze publicatie is geen abstract beschikbaar.\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"Herschrijft HTTP-koppelingen naar HTTPS, indien mogelijk\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"Oneindig scrollen\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"Volgende pagina automatisch laden bij bereiken van den onderkant van de huidige pagina\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"Open Access DOI herschrijven\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"Omzeilt betaalmuren met een doorverwijzing naar vrij toegankelijke versies van publicaties indien beschikbaar\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Koppelingen openen in nieuwe tabbladen\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"Resultaten worden standaard in hetzelfde venster geopend. Deze plug-in overschrijft het standaardgedrag zodat koppelingen in nieuwe tabbladen/vensters worden geopend. (JavaScript vereist)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"Zoeken bij selecteren van categorie\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"Zoekopdracht onmiddellijk uitvoeren wanneer dat een categorie wordt geselecteerd. Zet dit uit voor meerdere categorieën te selecteren. (JavaScript vereist)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"Geeft uw IP-adres weer als de zoekopdracht ‘ip’ is, en uwe gebruikersagent als de zoekopdracht ‘user agent’ bevat.\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"Tracker-URL-verwijderaar\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"Verwijdert trackerargumenten van de gekregen URL\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"Sneltoetsen gelijk in Vim\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"Bladert door zoekresultaten met sneltoetsen gelijk die in Vim (JavaScript vereist). Drukt op ‘h’ op de hoofdpagina of de pagina met resultaten voor hulp.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"Pagina niet gevonden\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"Ga naar %(search_page)s.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"zoekpagina\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"voorkeuren\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"over\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Voorkeuren\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Standaardcategorieën\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Zoektaal\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Standaardtaal\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Interfacetaal\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Auto-aanvullen\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"Afbeeldingsproxy\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Ingeschakeld\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Uitgeschakeld\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Methode\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"SafeSearch\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Strikt\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Gemiddeld\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Geen\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Thema’s\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Kleur\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Blauw (standaard)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Paars\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Groen\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Appelblauwzeegroen\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Oranje\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Rood\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"Momenteel gebruikte zoekmachienen\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"Naam zoekmachien\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Categorie\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Toestaan\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Blokkeren\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"Deze instellingen worden bewaard in uw cookies. Hierdoor hoeven wij niks over u te bewaren.\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"Deze cookies zijn alleen voor uw eigen gemak, we gebruiken deze cookies niet voor u te volgen.\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"opslaan\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Standaardinstellingen herstellen\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"terug\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"Zoek-URL\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Zoekresultaten downloaden\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Antwoorden\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Suggesties\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"vorige pagina\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"volgende pagina\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Zoeken naar...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"Zoekmachienstatistieken\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"oorspronkelijke context\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Seeders\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Leechers\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"magneetkoppeling\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"torrentbestand\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Klikt op het vergrootglas voor te zoeken\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Resultaten op nieuwe tabbladen\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"Aan\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"Uit\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"gecachet\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Geavanceerde instellingen\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Sluiten\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Fout!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Aangedreven door\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"een privacyrespecterend, aanpasbaar metazoekmachien\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"geproxyt\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"ondersteund\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"niet ondersteund\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"Algemeen\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Zoekmachienen\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Plug-ins\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Beantwoorders\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Cookies\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"Welke taal wilt ge gebruiken voor het zoeken?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Wijzigt de taal van den opmaak\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Zoekt tijdens het typen\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"Afbeeldingsresultaten via searx laden\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"Bepaalt hoe dat de formulieren worden ingestuurd, leest meer over opvraagmethodes\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"Filteren op inhoud\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Opmaak van searx aanpassen\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Kiest ne stijl voor dit thema\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Stijl\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"Open Access DOI herschrijven\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"Doorverwijzen naar vrij toegankelijke versies van publicaties, indien beschikbaar (plug-in vereist)\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Snelkoppeling\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"Geselecteerde taal\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Tijdspanne\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"Gem. duur\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"Max. duur\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"Dit is het overzicht van de instantantwoordmodules van searx.\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Naam\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Kernwoorden\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Beschrijving\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Voorbeelden\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Dit is de lijst van cookies en hun waarden die searx op uwe computer opslaat.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"Met deze lijst kunt ge de openheid van searx beoordelen.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Cookienaam\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Waarde\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"Zoek-URL van de momenteel opgeslagen voorkeuren\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"Let op: aangepaste instellingen opgeven in de zoek-URL kan nadelig zijn voor uw privacy, omdat het gegevens lekt aan de aangeklikte resultaatwebsites.\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Zoekresultaten\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"Probeert te zoeken naar:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"Zoekmachienen konden geen resultaten ophalen\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Koppelingen\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Start zoeken\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"stats\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"Altijd\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"Voorbijen dag\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"Voorbije week\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"Voorbije maand\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"Voorbije jaar\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"Opgelet!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Het lijkt erop dat ge searx voor den eerste keer gebruikt.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Informatie!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"der zijn momenteel geen cookies gedefinieerd.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"Der zijn momenteel geen gegevens beschikbaar.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"Zoekmachinen konden geen resultaten ophalen.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"Probeert het later opnieuw, of gebruikt een andere instantie van searx.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Sorry!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"We konden geen resultaten vinden. Probeert nen andere zoekopdracht, of zoekt in meer categorieën.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Goed gedaan!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"Instellingen opgeslagen.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"Oeps!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Der is iets misgegaan.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"media tonen\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"media verbergen\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Afbeelding tonen\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Bron bekijken\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"kaart tonen\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"kaart verbergen\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"details tonen\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"details verbergen\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Bestandsgrootte\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Bytes\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Aantal bestanden\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"video tonen\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"video verbergen\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Meer laden…\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"Geen resultaat gevonden\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Ondersteunt geselecteerde taal\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"Gebruikersinterface\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"Privacy\"\ndiff --git a/searx/translations/pl/LC_MESSAGES/messages.mo b/searx/translations/pl/LC_MESSAGES/messages.mo\nindex d5c5f55efc..e4a4510131 100644\nBinary files a/searx/translations/pl/LC_MESSAGES/messages.mo and b/searx/translations/pl/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/pt/LC_MESSAGES/messages.mo b/searx/translations/pt/LC_MESSAGES/messages.mo\nindex bbf2fba0fa..d88c449f1e 100644\nBinary files a/searx/translations/pt/LC_MESSAGES/messages.mo and b/searx/translations/pt/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/pt/LC_MESSAGES/messages.po b/searx/translations/pt/LC_MESSAGES/messages.po\nindex 81ac6f5b9e..d8446731a5 100644\n--- a/searx/translations/pt/LC_MESSAGES/messages.po\n+++ b/searx/translations/pt/LC_MESSAGES/messages.po\n@@ -4,13 +4,14 @@\n # \n # Translators:\n # Dickprince, 2017\n+# Chacal Exodius, 2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-12-26 22:43+0000\\n\"\n+\"Last-Translator: Chacal Exodius\\n\"\n \"Language-Team: Portuguese (http://www.transifex.com/asciimoo/searx/language/pt/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -21,15 +22,15 @@ msgstr \"\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"tempo esgotado\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"solicitar exceção\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"acidente inesperado\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -77,7 +78,7 @@ msgstr \"Definições inválidas, por favor edite as suas preferências\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"Configurações inválidas\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n@@ -143,7 +144,7 @@ msgstr \"Esta entrada foi substituída por\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"\"\n+msgstr \"Nenhum resumo está disponível para esta publicação.\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n@@ -159,7 +160,7 @@ msgstr \"Carregar automaticamente a próxima página assim que se desloque para o\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"Reescrita DOI de acesso aberto\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n@@ -461,7 +462,7 @@ msgstr \"Estes cookies servem somente para sua conveniência, não os utilizamos\n #: searx/templates/pix-art/preferences.html:78\n #: searx/templates/simple/preferences.html:185\n msgid \"save\"\n-msgstr \"guardar\"\n+msgstr \"Guardar\"\n \n #: searx/templates/courgette/preferences.html:128\n #: searx/templates/legacy/preferences.html:125\n@@ -476,7 +477,7 @@ msgstr \"Repor predefinições\"\n #: searx/templates/pix-art/preferences.html:79\n #: searx/templates/simple/preferences.html:187\n msgid \"back\"\n-msgstr \"atrás\"\n+msgstr \"Atrás\"\n \n #: searx/templates/courgette/results.html:12\n #: searx/templates/legacy/results.html:13\n@@ -630,11 +631,11 @@ msgstr \"via proxy\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n-msgstr \"\"\n+msgstr \"suportado\"\n \n #: searx/templates/oscar/macros.html:96\n msgid \"not supported\"\n-msgstr \"\"\n+msgstr \"não suportado\"\n \n #: searx/templates/oscar/preferences.html:13\n #: searx/templates/oscar/preferences.html:22\n@@ -712,13 +713,13 @@ msgstr \"Estilo\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"Resolvedor DOI de Acesso Aberto\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"Redirecionar para versões de acesso aberto de publicações quando disponíveis (requer plug-in)\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n@@ -729,7 +730,7 @@ msgstr \"Atalho\"\n #: searx/templates/oscar/preferences.html:164\n #: searx/templates/oscar/preferences.html:174\n msgid \"Selected language\"\n-msgstr \"\"\n+msgstr \"Idioma selecionado\"\n \n #: searx/templates/oscar/preferences.html:166\n #: searx/templates/oscar/preferences.html:172\n@@ -789,13 +790,13 @@ msgstr \"Valor\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"Search URL of the currently saved preferences\"\n-msgstr \"\"\n+msgstr \"URL de pesquisa das preferências salvas atualmente\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"\"\n+msgstr \"Nota: a especificação de configurações personalizadas no URL de pesquisa pode reduzir a privacidade ao vazar dados para os sites de resultados clicados.\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -804,12 +805,12 @@ msgstr \"Resultados de pesquisa\"\n #: searx/templates/oscar/results.html:21\n #: searx/templates/simple/results.html:84\n msgid \"Try searching for:\"\n-msgstr \"\"\n+msgstr \"Tente pesquisar por:\"\n \n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"\"\n+msgstr \"Mecanismos não podem recuperar resultados\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -874,12 +875,12 @@ msgstr \"Não existem dados disponíveis.\"\n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"\"\n+msgstr \"Mecanismos não podem recuperar resultados.\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"\"\n+msgstr \"Por favor, tente novamente mais tarde ou encontre outra ocorrência de searx.\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n@@ -998,7 +999,7 @@ msgstr \"Carregar mais...\"\n \n #: searx/templates/simple/base.html:31\n msgid \"No item found\"\n-msgstr \"\"\n+msgstr \"Nenhum item encontrado\"\n \n #: searx/templates/simple/preferences.html:89\n msgid \"Supports selected language\"\n@@ -1006,8 +1007,8 @@ msgstr \"Suporta idioma selecionado\"\n \n #: searx/templates/simple/preferences.html:118\n msgid \"User interface\"\n-msgstr \"\"\n+msgstr \"Interface de usuário\"\n \n #: searx/templates/simple/preferences.html:154\n msgid \"Privacy\"\n-msgstr \"\"\n+msgstr \"Privacidade\"\ndiff --git a/searx/translations/pt_BR/LC_MESSAGES/messages.mo b/searx/translations/pt_BR/LC_MESSAGES/messages.mo\nindex a169ac908f..03eb74f4ef 100644\nBinary files a/searx/translations/pt_BR/LC_MESSAGES/messages.mo and b/searx/translations/pt_BR/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/pt_BR/LC_MESSAGES/messages.po b/searx/translations/pt_BR/LC_MESSAGES/messages.po\nindex b5a786c70c..6a0a6b8374 100644\n--- a/searx/translations/pt_BR/LC_MESSAGES/messages.po\n+++ b/searx/translations/pt_BR/LC_MESSAGES/messages.po\n@@ -4,6 +4,7 @@\n # \n # Translators:\n # Adam Tauber , 2017\n+# Chacal Exodius, 2018\n # Gabriel Nunes , 2017\n # Guimarães Mello , 2017\n # Neton Brício , 2015\n@@ -14,8 +15,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2018-03-03 12:33+0000\\n\"\n-\"Last-Translator: shizuka\\n\"\n+\"PO-Revision-Date: 2018-08-06 05:21+0000\\n\"\n+\"Last-Translator: Chacal Exodius\\n\"\n \"Language-Team: Portuguese (Brazil) (http://www.transifex.com/asciimoo/searx/language/pt_BR/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -164,7 +165,7 @@ msgstr \"Automaticamente carregar a próxima página quando ir até o fim da pág\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"Reescrita DOI de acesso aberto\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n@@ -717,7 +718,7 @@ msgstr \"Estilo\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"Resolvedor DOI de Acesso Aberto\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\ndiff --git a/searx/translations/ro/LC_MESSAGES/messages.mo b/searx/translations/ro/LC_MESSAGES/messages.mo\nindex 5c6714e939..f39068576a 100644\nBinary files a/searx/translations/ro/LC_MESSAGES/messages.mo and b/searx/translations/ro/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/ro/LC_MESSAGES/messages.po b/searx/translations/ro/LC_MESSAGES/messages.po\nindex 21a2e7d1d5..7cfd1be37f 100644\n--- a/searx/translations/ro/LC_MESSAGES/messages.po\n+++ b/searx/translations/ro/LC_MESSAGES/messages.po\n@@ -4,13 +4,14 @@\n # \n # Translators:\n # adrian.fita , 2015\n+# Daniel Șerbănescu , 2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-11-15 21:18+0000\\n\"\n+\"Last-Translator: Daniel Șerbănescu \\n\"\n \"Language-Team: Romanian (http://www.transifex.com/asciimoo/searx/language/ro/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -21,15 +22,15 @@ msgstr \"\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"timp alocat expirat\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"excepție la cerere\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"terminare prematură neașteptată\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -37,7 +38,7 @@ msgstr \"fișiere\"\n \n #: searx/webapp.py:137\n msgid \"general\"\n-msgstr \"general\"\n+msgstr \"generale\"\n \n #: searx/webapp.py:138\n msgid \"music\"\n@@ -57,7 +58,7 @@ msgstr \"videouri\"\n \n #: searx/webapp.py:142\n msgid \"it\"\n-msgstr \"el(ea)\"\n+msgstr \"informatică\"\n \n #: searx/webapp.py:143\n msgid \"news\"\n@@ -65,23 +66,23 @@ msgstr \"știri\"\n \n #: searx/webapp.py:144\n msgid \"map\"\n-msgstr \"hartă\"\n+msgstr \"hărți\"\n \n #: searx/webapp.py:145\n msgid \"science\"\n-msgstr \"\"\n+msgstr \"știință\"\n \n #: searx/webapp.py:399 searx/webapp.py:658\n msgid \"Invalid settings, please edit your preferences\"\n-msgstr \"\"\n+msgstr \"Configurări nevalide, editați preferințele\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"Configurări nevalide\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n-msgstr \"\"\n+msgstr \"eroare de căutare\"\n \n #: searx/webapp.py:530\n msgid \"{minutes} minute(s) ago\"\n@@ -93,27 +94,27 @@ msgstr \"{hours} oră(e), {minutes} minut(e) în urmă\"\n \n #: searx/answerers/random/answerer.py:53\n msgid \"Random value generator\"\n-msgstr \"\"\n+msgstr \"Generator de valori aleatorii\"\n \n #: searx/answerers/random/answerer.py:54\n msgid \"Generate different random values\"\n-msgstr \"\"\n+msgstr \"Generează valori aleatoare diferite\"\n \n #: searx/answerers/statistics/answerer.py:53\n msgid \"Statistics functions\"\n-msgstr \"\"\n+msgstr \"Funcții statistice\"\n \n #: searx/answerers/statistics/answerer.py:54\n msgid \"Compute {functions} of the arguments\"\n-msgstr \"\"\n+msgstr \"Calculează {functions} din argumente\"\n \n #: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n msgid \"Engine time (sec)\"\n-msgstr \"\"\n+msgstr \"Timpul motorului (sec)\"\n \n #: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n msgid \"Page loads (sec)\"\n-msgstr \"Încărcarea paginilor (sec)\"\n+msgstr \"Încărcarea paginii (sec)\"\n \n #: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n #: searx/templates/oscar/results.html:95\n@@ -135,15 +136,15 @@ msgstr \"Erori\"\n \n #: searx/engines/pdbe.py:87\n msgid \"{title}&nbsp;(OBSOLETE)\"\n-msgstr \"\"\n+msgstr \"{title}&nbsp;(ÎNVECHIT)\"\n \n #: searx/engines/pdbe.py:91\n msgid \"This entry has been superseded by\"\n-msgstr \"\"\n+msgstr \"Această intrare a fost perimată de\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"\"\n+msgstr \"Niciun abstract disponibil pentru această publicație.\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n@@ -151,86 +152,86 @@ msgstr \"Rescrie legăturile HTTP cu HTTPS dacă e posibil\"\n \n #: searx/plugins/infinite_scroll.py:3\n msgid \"Infinite scroll\"\n-msgstr \"\"\n+msgstr \"Derulare infinită\"\n \n #: searx/plugins/infinite_scroll.py:4\n msgid \"Automatically load next page when scrolling to bottom of current page\"\n-msgstr \"\"\n+msgstr \"Încarcă automat pagina următoare când se derulează la baza paginii curente\"\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"Rescriere către acces deschis DOI\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n \"Avoid paywalls by redirecting to open-access versions of publications when \"\n \"available\"\n-msgstr \"\"\n+msgstr \"Evită „zidurile de plată” redirecționând către versiuni cu acces deschis ale publicațiilor când sunt disponibile\"\n \n #: searx/plugins/open_results_on_new_tab.py:18\n #: searx/templates/oscar/preferences.html:114\n #: searx/templates/simple/preferences.html:149\n msgid \"Open result links on new browser tabs\"\n-msgstr \"\"\n+msgstr \"Deschide legăturile rezultate în taburi noi\"\n \n #: searx/plugins/open_results_on_new_tab.py:19\n msgid \"\"\n \"Results are opened in the same window by default. This plugin overwrites the\"\n \" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n-msgstr \"\"\n+msgstr \"Rezultatele sunt deschise în aceeași fereastră în mod implicit. Acest modul suprascrie acțiunea implicită de a deschide legături în ferestre/taburi noi. (Necesită JavaScript)\"\n \n #: searx/plugins/search_on_category_select.py:18\n msgid \"Search on category select\"\n-msgstr \"Căutare în categoria selectată\"\n+msgstr \"Caută la selectarea categoriei\"\n \n #: searx/plugins/search_on_category_select.py:19\n msgid \"\"\n \"Perform search immediately if a category selected. Disable to select \"\n \"multiple categories. (JavaScript required)\"\n-msgstr \"\"\n+msgstr \"Execută căutarea imediat dacă o categorie este selectată. Dezactivează pentru a selecta categorii multiple. (Necesită JavaScript)\"\n \n #: searx/plugins/self_info.py:20\n msgid \"\"\n \"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n \"contains \\\"user agent\\\".\"\n-msgstr \"\"\n+msgstr \"Afișează IP-ul dacă interogarea este „ip” și agentul de utilizator dacă interogarea conține „user agent”.\"\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n-msgstr \"\"\n+msgstr \"Eliminator de URL pentru urmăritor\"\n \n #: searx/plugins/tracker_url_remover.py:27\n msgid \"Remove trackers arguments from the returned URL\"\n-msgstr \"\"\n+msgstr \"Elimină argumentele urmăritorului din URL-ul returnat\"\n \n #: searx/plugins/vim_hotkeys.py:3\n msgid \"Vim-like hotkeys\"\n-msgstr \"\"\n+msgstr \"Scurtături de tastatură în stilul Vim\"\n \n #: searx/plugins/vim_hotkeys.py:4\n msgid \"\"\n \"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n \"\\\"h\\\" key on main or result page to get help.\"\n-msgstr \"\"\n+msgstr \"Navighează rezultatele căutării cu scurtături de tastatură în stilul Vim (necesită JavaScript). Apăsați tasta „h” în pagina principală sau în pagina cu rezultate pentru a obține ajutor.\"\n \n #: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n #: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n #: searx/templates/simple/404.html:4\n msgid \"Page not found\"\n-msgstr \"\"\n+msgstr \"Pagină negăsită\"\n \n #: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n #: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n #: searx/templates/simple/404.html:6\n #, python-format\n msgid \"Go to %(search_page)s.\"\n-msgstr \"\"\n+msgstr \"Navighează la %(search_page)s\"\n \n #: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n #: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n #: searx/templates/simple/404.html:6\n msgid \"search page\"\n-msgstr \"\"\n+msgstr \"pagină de căutare\"\n \n #: searx/templates/courgette/index.html:9\n #: searx/templates/courgette/index.html:13\n@@ -280,7 +281,7 @@ msgstr \"Limba de căutare\"\n #: searx/templates/simple/languages.html:2\n #: searx/templates/simple/preferences.html:42\n msgid \"Default language\"\n-msgstr \"\"\n+msgstr \"Limba implicită\"\n \n #: searx/templates/courgette/preferences.html:24\n #: searx/templates/legacy/preferences.html:25\n@@ -443,7 +444,7 @@ msgstr \"Blochează\"\n msgid \"\"\n \"These settings are stored in your cookies, this allows us not to store this \"\n \"data about you.\"\n-msgstr \"Aceste setări sunt stocate în cookie-urile d-voastră, aceasta ne permite să nu stocăm aceste date despre d-voastră.\"\n+msgstr \"Aceste configurări sunt stocate în cookie-uri, ceea ce ne permite să nu stocăm aceste date despre dumeavoastră.\"\n \n #: searx/templates/courgette/preferences.html:124\n #: searx/templates/legacy/preferences.html:121\n@@ -453,7 +454,7 @@ msgstr \"Aceste setări sunt stocate în cookie-urile d-voastră, aceasta ne perm\n msgid \"\"\n \"These cookies serve your sole convenience, we don't use these cookies to \"\n \"track you.\"\n-msgstr \"Aceste cookie-uri servesc doar pentru confortul d-voastră, noi nu stocăm aceste cookie-uri pentru a vă urmări.\"\n+msgstr \"Aceste cookie-uri servesc doar pentru conveniența dumneavoastră, noi nu stocăm aceste cookie-uri pentru a vă urmări.\"\n \n #: searx/templates/courgette/preferences.html:127\n #: searx/templates/legacy/preferences.html:124\n@@ -468,7 +469,7 @@ msgstr \"salvează\"\n #: searx/templates/oscar/preferences.html:307\n #: searx/templates/simple/preferences.html:186\n msgid \"Reset defaults\"\n-msgstr \"Resetează valorile implicite\"\n+msgstr \"Restabilește la valorile implicite\"\n \n #: searx/templates/courgette/preferences.html:129\n #: searx/templates/legacy/preferences.html:126\n@@ -543,14 +544,14 @@ msgstr \"contextul original\"\n #: searx/templates/oscar/result_templates/torrent.html:6\n #: searx/templates/simple/result_templates/torrent.html:9\n msgid \"Seeder\"\n-msgstr \"Seeder\"\n+msgstr \"Partener\"\n \n #: searx/templates/courgette/result_templates/torrent.html:7\n #: searx/templates/legacy/result_templates/torrent.html:11\n #: searx/templates/oscar/result_templates/torrent.html:6\n #: searx/templates/simple/result_templates/torrent.html:9\n msgid \"Leecher\"\n-msgstr \"Leecher\"\n+msgstr \"Sursă incompletă\"\n \n #: searx/templates/courgette/result_templates/torrent.html:9\n #: searx/templates/legacy/result_templates/torrent.html:9\n@@ -575,19 +576,19 @@ msgstr \"Apăsați pe lupă pentru a executa căutarea\"\n #: searx/templates/oscar/preferences.html:113\n #: searx/templates/simple/preferences.html:142\n msgid \"Results on new tabs\"\n-msgstr \"\"\n+msgstr \"Rezultate în taburi noi\"\n \n #: searx/templates/legacy/preferences.html:87\n #: searx/templates/oscar/preferences.html:117\n #: searx/templates/simple/preferences.html:145\n msgid \"On\"\n-msgstr \"\"\n+msgstr \"Pornit\"\n \n #: searx/templates/legacy/preferences.html:88\n #: searx/templates/oscar/preferences.html:118\n #: searx/templates/simple/preferences.html:146\n msgid \"Off\"\n-msgstr \"\"\n+msgstr \"Oprit\"\n \n #: searx/templates/legacy/result_templates/code.html:3\n #: searx/templates/legacy/result_templates/default.html:3\n@@ -599,7 +600,7 @@ msgstr \"stocat temporar\"\n \n #: searx/templates/oscar/advanced.html:4\n msgid \"Advanced settings\"\n-msgstr \"\"\n+msgstr \"Configurări avansate\"\n \n #: searx/templates/oscar/base.html:62\n #: searx/templates/oscar/messages/first_time.html:4\n@@ -613,7 +614,7 @@ msgstr \"Închide\"\n #: searx/templates/simple/messages/no_results.html:4\n #: searx/templates/simple/results.html:25\n msgid \"Error!\"\n-msgstr \"\"\n+msgstr \"Eroare!\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"Powered by\"\n@@ -626,21 +627,21 @@ msgstr \"un meta-motor de căutare care respectă confidențialitatea\"\n #: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n #: searx/templates/simple/macros.html:43\n msgid \"proxied\"\n-msgstr \"\"\n+msgstr \"delegat\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n-msgstr \"\"\n+msgstr \"suporat\"\n \n #: searx/templates/oscar/macros.html:96\n msgid \"not supported\"\n-msgstr \"\"\n+msgstr \"nesuportat\"\n \n #: searx/templates/oscar/preferences.html:13\n #: searx/templates/oscar/preferences.html:22\n #: searx/templates/simple/preferences.html:32\n msgid \"General\"\n-msgstr \"General\"\n+msgstr \"Generale\"\n \n #: searx/templates/oscar/preferences.html:14\n #: searx/templates/oscar/preferences.html:146\n@@ -656,12 +657,12 @@ msgstr \"Module\"\n #: searx/templates/oscar/preferences.html:16\n #: searx/templates/oscar/preferences.html:245\n msgid \"Answerers\"\n-msgstr \"\"\n+msgstr \"Răspunzători\"\n \n #: searx/templates/oscar/preferences.html:17\n #: searx/templates/oscar/preferences.html:272\n msgid \"Cookies\"\n-msgstr \"\"\n+msgstr \"Cookie-uri\"\n \n #: searx/templates/oscar/preferences.html:42\n #: searx/templates/simple/preferences.html:48\n@@ -703,99 +704,99 @@ msgstr \"Schimbă aspectul lui searx\"\n #: searx/templates/oscar/preferences.html:106\n #: searx/templates/oscar/preferences.html:111\n msgid \"Choose style for this theme\"\n-msgstr \"\"\n+msgstr \"Alegeți stilul pentru această temă\"\n \n #: searx/templates/oscar/preferences.html:106\n #: searx/templates/oscar/preferences.html:111\n msgid \"Style\"\n-msgstr \"\"\n+msgstr \"Stil\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"Rezolvator de acces deschis DOI\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"Redirecționează către versiuni cu acces deschis ale publicațiilor când sunt disponibile (modul necesar)\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n #: searx/templates/simple/preferences.html:88\n msgid \"Shortcut\"\n-msgstr \"\"\n+msgstr \"Scurtătură\"\n \n #: searx/templates/oscar/preferences.html:164\n #: searx/templates/oscar/preferences.html:174\n msgid \"Selected language\"\n-msgstr \"\"\n+msgstr \"Limba selectată\"\n \n #: searx/templates/oscar/preferences.html:166\n #: searx/templates/oscar/preferences.html:172\n #: searx/templates/simple/preferences.html:91\n msgid \"Time range\"\n-msgstr \"\"\n+msgstr \"Interval de timp\"\n \n #: searx/templates/oscar/preferences.html:167\n #: searx/templates/oscar/preferences.html:171\n #: searx/templates/simple/preferences.html:92\n msgid \"Avg. time\"\n-msgstr \"\"\n+msgstr \"Timp mediu\"\n \n #: searx/templates/oscar/preferences.html:168\n #: searx/templates/oscar/preferences.html:170\n #: searx/templates/simple/preferences.html:93\n msgid \"Max time\"\n-msgstr \"\"\n+msgstr \"Timp maxim\"\n \n #: searx/templates/oscar/preferences.html:248\n msgid \"This is the list of searx's instant answering modules.\"\n-msgstr \"\"\n+msgstr \"Aceasta este lista de module de răspundere instantă a lui searx.\"\n \n #: searx/templates/oscar/preferences.html:252\n msgid \"Name\"\n-msgstr \"\"\n+msgstr \"Nume\"\n \n #: searx/templates/oscar/preferences.html:253\n msgid \"Keywords\"\n-msgstr \"\"\n+msgstr \"Cuvinte cheie\"\n \n #: searx/templates/oscar/preferences.html:254\n msgid \"Description\"\n-msgstr \"\"\n+msgstr \"Descriere\"\n \n #: searx/templates/oscar/preferences.html:255\n msgid \"Examples\"\n-msgstr \"\"\n+msgstr \"Exemple\"\n \n #: searx/templates/oscar/preferences.html:275\n msgid \"\"\n \"This is the list of cookies and their values searx is storing on your \"\n \"computer.\"\n-msgstr \"\"\n+msgstr \"Aceasta este lista de cookie-uri și valorile lor pe care searx le stochează pe calculatorul dumneavoastră.\"\n \n #: searx/templates/oscar/preferences.html:276\n msgid \"With that list, you can assess searx transparency.\"\n-msgstr \"\"\n+msgstr \"Cu acea listă puteți evalua nivelul de transparență al lui searx.\"\n \n #: searx/templates/oscar/preferences.html:281\n msgid \"Cookie name\"\n-msgstr \"\"\n+msgstr \"Nume cookie\"\n \n #: searx/templates/oscar/preferences.html:282\n msgid \"Value\"\n-msgstr \"\"\n+msgstr \"Valuare\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"Search URL of the currently saved preferences\"\n-msgstr \"\"\n+msgstr \"URL-ul de căutare al preferințelor salvate curent\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"\"\n+msgstr \"Notă: specificând configurări personalizate în URL-ul de căutare poate reduce nivelul de confidențialitate prin scurgerea datelor către siturile accesate la căutare.\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -804,12 +805,12 @@ msgstr \"Rezultatele căutării\"\n #: searx/templates/oscar/results.html:21\n #: searx/templates/simple/results.html:84\n msgid \"Try searching for:\"\n-msgstr \"\"\n+msgstr \"Încercați să căutați după:\"\n \n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"\"\n+msgstr \"Motoarele nu pot obține rezultate\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -828,27 +829,27 @@ msgstr \"statistici\"\n #: searx/templates/oscar/time-range.html:3\n #: searx/templates/simple/time-range.html:3\n msgid \"Anytime\"\n-msgstr \"\"\n+msgstr \"Oricând\"\n \n #: searx/templates/oscar/time-range.html:6\n #: searx/templates/simple/time-range.html:6\n msgid \"Last day\"\n-msgstr \"\"\n+msgstr \"Ultima zi\"\n \n #: searx/templates/oscar/time-range.html:9\n #: searx/templates/simple/time-range.html:9\n msgid \"Last week\"\n-msgstr \"\"\n+msgstr \"Ultima săptămână\"\n \n #: searx/templates/oscar/time-range.html:12\n #: searx/templates/simple/time-range.html:12\n msgid \"Last month\"\n-msgstr \"\"\n+msgstr \"Ultima lună\"\n \n #: searx/templates/oscar/time-range.html:15\n #: searx/templates/simple/time-range.html:15\n msgid \"Last year\"\n-msgstr \"\"\n+msgstr \"Ultimul an\"\n \n #: searx/templates/oscar/messages/first_time.html:6\n #: searx/templates/oscar/messages/no_data_available.html:3\n@@ -861,11 +862,11 @@ msgstr \"Se pare că folosiți searx pentru prima dată.\"\n \n #: searx/templates/oscar/messages/no_cookies.html:3\n msgid \"Information!\"\n-msgstr \"\"\n+msgstr \"Informație!\"\n \n #: searx/templates/oscar/messages/no_cookies.html:4\n msgid \"currently, there are no cookies defined.\"\n-msgstr \"\"\n+msgstr \"momentan, nu există cookie-uri definite\"\n \n #: searx/templates/oscar/messages/no_data_available.html:4\n msgid \"There is currently no data available. \"\n@@ -874,24 +875,24 @@ msgstr \"Deocamdată nu există date disponibile.\"\n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"\"\n+msgstr \"Motoarele nu pot obține rezultate\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"\"\n+msgstr \"Încercați din nou mai târziu sau folosiți o altă instanță searx-\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n msgid \"Sorry!\"\n-msgstr \"Îmi pare rău!\"\n+msgstr \"Ne pare rău!\"\n \n #: searx/templates/oscar/messages/no_results.html:15\n #: searx/templates/simple/messages/no_results.html:15\n msgid \"\"\n \"we didn't find any results. Please use another query or search in more \"\n \"categories.\"\n-msgstr \"n-am găsit nici un rezultat. Vă rog folosiți o altă interogare sau căutați în mai multe categorii.\"\n+msgstr \"n-am găsit nici un rezultat. Folosiți o altă interogare sau căutați în mai multe categorii.\"\n \n #: searx/templates/oscar/messages/save_settings_successfull.html:7\n msgid \"Well done!\"\n@@ -899,7 +900,7 @@ msgstr \"Bravo!\"\n \n #: searx/templates/oscar/messages/save_settings_successfull.html:8\n msgid \"Settings saved successfully.\"\n-msgstr \"Setările au fost salvate cu succes.\"\n+msgstr \"Configurările au fost salvate cu succes.\"\n \n #: searx/templates/oscar/messages/unknow_error.html:7\n msgid \"Oh snap!\"\n@@ -998,16 +999,16 @@ msgstr \"Încarcă mai multe...\"\n \n #: searx/templates/simple/base.html:31\n msgid \"No item found\"\n-msgstr \"\"\n+msgstr \"Niciun element găsit\"\n \n #: searx/templates/simple/preferences.html:89\n msgid \"Supports selected language\"\n-msgstr \"\"\n+msgstr \"Suportă limba selectată\"\n \n #: searx/templates/simple/preferences.html:118\n msgid \"User interface\"\n-msgstr \"\"\n+msgstr \"Interfața pentru utilizator\"\n \n #: searx/templates/simple/preferences.html:154\n msgid \"Privacy\"\n-msgstr \"\"\n+msgstr \"Confidențialitate\"\ndiff --git a/searx/translations/ru/LC_MESSAGES/messages.mo b/searx/translations/ru/LC_MESSAGES/messages.mo\nindex 9d5ea61b08..c6bcdd1b6d 100644\nBinary files a/searx/translations/ru/LC_MESSAGES/messages.mo and b/searx/translations/ru/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/ru/LC_MESSAGES/messages.po b/searx/translations/ru/LC_MESSAGES/messages.po\nindex f6bda5f1b6..befe7f963e 100644\n--- a/searx/translations/ru/LC_MESSAGES/messages.po\n+++ b/searx/translations/ru/LC_MESSAGES/messages.po\n@@ -3,7 +3,7 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n-# Andrey, 2017\n+# Andrey, 2017-2019\n # dimqua , 2015\n # dimqua , 2015\n # dimqua , 2017\n@@ -14,8 +14,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2018-02-26 16:39+0000\\n\"\n-\"Last-Translator: John DOe \\n\"\n+\"PO-Revision-Date: 2019-01-05 12:11+0000\\n\"\n+\"Last-Translator: Andrey\\n\"\n \"Language-Team: Russian (http://www.transifex.com/asciimoo/searx/language/ru/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -34,7 +34,7 @@ msgstr \"ошибка выполнения запроса\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"неожиданное падение\"\n+msgstr \"неожиданный сбой\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -148,7 +148,7 @@ msgstr \"Эта запись была заменена на\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"Для данной публикации недоступно\"\n+msgstr \"Нет аннотации для этой публикации.\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n@@ -198,15 +198,15 @@ msgstr \"Выполнять поиск немедленно, если выбра\n msgid \"\"\n \"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n \"contains \\\"user agent\\\".\"\n-msgstr \"Отображает ваш IP-адрес при запросе \\\"ip\\\" и отпечаток браузера при запросе \\\"user agent\\\".\"\n+msgstr \"Отображает ваш IP-адрес при запросе \\\"ip\\\" и пользовательский агент при запросе \\\"user agent\\\".\"\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n-msgstr \"Удаление трекера URL-адресов\"\n+msgstr \"Удаление трекинга URL-адресов\"\n \n #: searx/plugins/tracker_url_remover.py:27\n msgid \"Remove trackers arguments from the returned URL\"\n-msgstr \"Удаляет аргументы отслеживания из возвращенного URL-адреса\"\n+msgstr \"Удаляет аргументы отслеживания из URL-адреса\"\n \n #: searx/plugins/vim_hotkeys.py:3\n msgid \"Vim-like hotkeys\"\n@@ -300,7 +300,7 @@ msgstr \"Язык интерфейса\"\n #: searx/templates/oscar/preferences.html:57\n #: searx/templates/simple/preferences.html:51\n msgid \"Autocomplete\"\n-msgstr \"Подгрузка результатов\"\n+msgstr \"Автозавершение\"\n \n #: searx/templates/courgette/preferences.html:45\n #: searx/templates/legacy/preferences.html:46\n@@ -622,7 +622,7 @@ msgstr \"Ошибка!\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"Powered by\"\n-msgstr \"Используется\"\n+msgstr \"Основано на\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"a privacy-respecting, hackable metasearch engine\"\n@@ -631,7 +631,7 @@ msgstr \"уважающая вашу приватность, открытая м\n #: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n #: searx/templates/simple/macros.html:43\n msgid \"proxied\"\n-msgstr \"proxy\"\n+msgstr \"proxied\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n@@ -671,7 +671,7 @@ msgstr \"Cookie\"\n #: searx/templates/oscar/preferences.html:42\n #: searx/templates/simple/preferences.html:48\n msgid \"What language do you prefer for search?\"\n-msgstr \"На каком языке вы предпочитаете искать?\"\n+msgstr \"Какой язык предпочтителен для поиска?\"\n \n #: searx/templates/oscar/preferences.html:48\n #: searx/templates/simple/preferences.html:128\n@@ -686,7 +686,7 @@ msgstr \"Поисковые предложения по мере ввода\"\n #: searx/templates/oscar/preferences.html:69\n #: searx/templates/simple/preferences.html:173\n msgid \"Proxying image results through searx\"\n-msgstr \"Загружать найденные изображения через searx\"\n+msgstr \"Проксировать найденные изображения с помощью searx\"\n \n #: searx/templates/oscar/preferences.html:78\n msgid \"\"\n@@ -698,7 +698,7 @@ msgstr \"Изменяет способ отправки запросов. , 2017\n+# asladic , 2017-2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-05-01 08:59+0000\\n\"\n+\"Last-Translator: asladic \\n\"\n \"Language-Team: Slovenian (http://www.transifex.com/asciimoo/searx/language/sl/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -21,15 +21,15 @@ msgstr \"\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"presežena časovna omejitev\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"napaka poizvedbe\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"nepričakovana napaka\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -77,7 +77,7 @@ msgstr \"Neveljavne nastavitve. Prosimo, preverite vašo konfiguracijo\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"Neveljavne nastavitve\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n@@ -630,11 +630,11 @@ msgstr \"preko posredniškega strežnika\"\n \n #: searx/templates/oscar/macros.html:92\n msgid \"supported\"\n-msgstr \"\"\n+msgstr \"podprto\"\n \n #: searx/templates/oscar/macros.html:96\n msgid \"not supported\"\n-msgstr \"\"\n+msgstr \"ni podprto\"\n \n #: searx/templates/oscar/preferences.html:13\n #: searx/templates/oscar/preferences.html:22\n@@ -718,7 +718,7 @@ msgstr \"\"\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"Preusmeri na prosto dostopne različice publikacij, ko so na voljo (zahtevan vtičnik)\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n@@ -729,7 +729,7 @@ msgstr \"Bližnjica\"\n #: searx/templates/oscar/preferences.html:164\n #: searx/templates/oscar/preferences.html:174\n msgid \"Selected language\"\n-msgstr \"\"\n+msgstr \"Izbrani jezik\"\n \n #: searx/templates/oscar/preferences.html:166\n #: searx/templates/oscar/preferences.html:172\n@@ -789,13 +789,13 @@ msgstr \"Vrednost\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"Search URL of the currently saved preferences\"\n-msgstr \"\"\n+msgstr \"Iskalni URL trenutno shranjenih nastavitev\"\n \n #: searx/templates/oscar/preferences.html:301\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"\"\n+msgstr \"Opomba: navajanje lastnih nastavitev v iskalnem URL lahko vodi do zmanjšane zasebnosti preko podajanja podatkov izbranim rezultatom.\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -804,12 +804,12 @@ msgstr \"Zadetki iskanja\"\n #: searx/templates/oscar/results.html:21\n #: searx/templates/simple/results.html:84\n msgid \"Try searching for:\"\n-msgstr \"\"\n+msgstr \"Poskusite iskati:\"\n \n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"\"\n+msgstr \"Iskalniki ne morejo pridobiti rezultatov\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -874,12 +874,12 @@ msgstr \"Trenutno ni podatkov na voljo.\"\n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"\"\n+msgstr \"Iskalniki ne morejo pridobiti rezultatov.\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"\"\n+msgstr \"Prosimo, poskusite kasneje tu ali na drugi instanci searx.\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n@@ -998,7 +998,7 @@ msgstr \"Naloži več...\"\n \n #: searx/templates/simple/base.html:31\n msgid \"No item found\"\n-msgstr \"\"\n+msgstr \"Ni zadetkov\"\n \n #: searx/templates/simple/preferences.html:89\n msgid \"Supports selected language\"\n@@ -1006,8 +1006,8 @@ msgstr \"Podpira izbrani jezik\"\n \n #: searx/templates/simple/preferences.html:118\n msgid \"User interface\"\n-msgstr \"\"\n+msgstr \"Uporabniški vmesnik\"\n \n #: searx/templates/simple/preferences.html:154\n msgid \"Privacy\"\n-msgstr \"\"\n+msgstr \"Zasebnost\"\ndiff --git a/searx/translations/sr/LC_MESSAGES/messages.mo b/searx/translations/sr/LC_MESSAGES/messages.mo\nindex b38a68d1d3..65efaaa1a9 100644\nBinary files a/searx/translations/sr/LC_MESSAGES/messages.mo and b/searx/translations/sr/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/sv/LC_MESSAGES/messages.mo b/searx/translations/sv/LC_MESSAGES/messages.mo\nindex b3687f94a4..73d36de8cd 100644\nBinary files a/searx/translations/sv/LC_MESSAGES/messages.mo and b/searx/translations/sv/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/sv/LC_MESSAGES/messages.po b/searx/translations/sv/LC_MESSAGES/messages.po\nindex 3c98e06fc6..e8b01c8324 100644\n--- a/searx/translations/sv/LC_MESSAGES/messages.po\n+++ b/searx/translations/sv/LC_MESSAGES/messages.po\n@@ -4,13 +4,14 @@\n # \n # Translators:\n # Jonatan Nyberg, 2016-2017\n+# Jonatan Nyberg, 2018\n # Jonatan Nyberg, 2017-2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2018-01-13 11:39+0000\\n\"\n+\"PO-Revision-Date: 2018-07-24 18:39+0000\\n\"\n \"Last-Translator: Jonatan Nyberg\\n\"\n \"Language-Team: Swedish (http://www.transifex.com/asciimoo/searx/language/sv/)\\n\"\n \"MIME-Version: 1.0\\n\"\n@@ -160,7 +161,7 @@ msgstr \"Automatiskt ladda nästa sida när du bläddrar till botten av aktuell s\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"Open Access DOI-omskrivning\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n@@ -182,7 +183,7 @@ msgstr \"Resultat öppnas i samma fönster som standard. Denna insticksmodul skri\n \n #: searx/plugins/search_on_category_select.py:18\n msgid \"Search on category select\"\n-msgstr \"Sök på kategori välj\"\n+msgstr \"Sök vid val av kategori\"\n \n #: searx/plugins/search_on_category_select.py:19\n msgid \"\"\n@@ -198,7 +199,7 @@ msgstr \"Visar din IP om förfrågan är \\\"ip\\\" och din användaragent om förfr\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n-msgstr \"Trackerwebbadress borttagare\"\n+msgstr \"Bevakningswebbadress borttagare\"\n \n #: searx/plugins/tracker_url_remover.py:27\n msgid \"Remove trackers arguments from the returned URL\"\n@@ -263,7 +264,7 @@ msgstr \"Inställningar\"\n #: searx/templates/oscar/preferences.html:35\n #: searx/templates/simple/preferences.html:34\n msgid \"Default categories\"\n-msgstr \"Standard kategorier\"\n+msgstr \"Standardkategorier\"\n \n #: searx/templates/courgette/preferences.html:13\n #: searx/templates/legacy/preferences.html:14\n@@ -713,7 +714,7 @@ msgstr \"Stil\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"Open Access DOI-lösare\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\ndiff --git a/searx/translations/te/LC_MESSAGES/messages.mo b/searx/translations/te/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..57488bf0bd\nBinary files /dev/null and b/searx/translations/te/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/te/LC_MESSAGES/messages.po b/searx/translations/te/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..8da1506ccf\n--- /dev/null\n+++ b/searx/translations/te/LC_MESSAGES/messages.po\n@@ -0,0 +1,1013 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# Joseph Nuthalapati , 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-03-08 16:40+0000\\n\"\n+\"Last-Translator: Joseph Nuthalapati \\n\"\n+\"Language-Team: Telugu (http://www.transifex.com/asciimoo/searx/language/te/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: te\\n\"\n+\"Plural-Forms: nplurals=2; plural=(n != 1);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"కాలపరిమితి దాటిపోయింది\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"ఫైళ్ళు\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"సాధారణ\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"సంగీతం\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"సోషల్ మీడియా\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"చిత్రాలు\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"వీడియోలు\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"ఐటి\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"వార్తలు\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"పటము\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"విజ్ఞానశాస్త్రం\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"చెల్లని అమరికలు\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"శోధనలో దోషము\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} నిమిషము(ల) క్రిందట\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"సాంఖ్యకశాస్త్ర ప్రమేయాలు\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"ఫలితముల సంఖ్య\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"దోషములు\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"పుట దొరకలేదు\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"%(search_page)sకు వెళ్ళు\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"శోధన పుట\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"అభిరుచులు\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"గురించి\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"అభిరుచులు\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"నిష్క్రియ వర్గాలు\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"శోధన భాష\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"నిష్క్రియ భాష\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"వినిమయసీమ భాష\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"విధానం\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"సురక్షితశోధన\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"కఠినమైన\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"మితమైన\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"ఏమీ లేదు\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"రంగు\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"నీలం (నిష్క్రియం)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"ఊదారంగు\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"ఆకుపచ్చ\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \" ముదురు నీలం\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"నారింజ\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"ఎరుపు\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"ప్రస్తుతం ఉపయోగించబడుతున్న శోధన యంత్రాలు\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"యంత్రం పేరు\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"వర్గము\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"అనుమతించు\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"అడ్డగించు\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"దాచు\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"నిష్క్రియాలకు అమర్చు\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"వెనక్కి\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"శోధన URL\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"ఫలితాలను దింపుకోండి\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"జవాబులు\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"సూచనలు\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"పూర్వపు పుట\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"తర్వాతి పుట\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"శోధించు...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"మూసివేయు\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"దోషం!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"ఆదరించబడిన\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"ఆదరణ లేని\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"సాధారణ\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"యంత్రాలు\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"ప్లగిన్లు\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"జవాబులు\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"కుకీలు\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"వినిమయసీమ యొక్క భాషను మార్చు\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"టైపు చేస్తూ శోధించు\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"విషయాలను వడకట్టు\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"శైలి\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"సత్వరమార్గం\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"ఎంచుకున్న భాష\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"కాల శ్రేణి\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"సగటు సమయం\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"గరిష్ఠ సమయం\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"పేరు\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"వర్ణన\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"ఉదాహరణలు\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"కుకీ పేరు\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"విలువ\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"దీనికొరకు శోధించండి:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"యంత్రాలు ఫలితాలను రాబట్టలేకపోతున్నాయి\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"లంకెలు\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"శోధన ప్రారంభించు\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"స్థితి వివరణ లెక్కలు\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"ఎప్పుడైనా\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"క్రిందటి రోజు\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"క్రిందటి వారం\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"క్రిందటి నెల\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"క్రిందటి సంవత్సరం\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"జాగ్రత్త!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"సమాచారం!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"యంత్రాలు ఫలితాలను రాబట్టలేకపోయాయి.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"క్షమించండి!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"భళా!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"ఆమరికలు విజయవంతంగా పొందుపరచబడ్డాయి.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"అయ్యో!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"ఏదో తప్పు జరిగింది.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"ఫైళ్ళ సంఖ్య\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"వీడియో చూపించు\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"వీడియోను దాచిపెట్టు\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"ఏమీ దొరకలేదు\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"ఆంతరంగికత\"\ndiff --git a/searx/translations/tr/LC_MESSAGES/messages.mo b/searx/translations/tr/LC_MESSAGES/messages.mo\nindex 4125a24adb..ec47020aa0 100644\nBinary files a/searx/translations/tr/LC_MESSAGES/messages.mo and b/searx/translations/tr/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/tr/LC_MESSAGES/messages.po b/searx/translations/tr/LC_MESSAGES/messages.po\nindex 65974de7a8..a15c85609f 100644\n--- a/searx/translations/tr/LC_MESSAGES/messages.po\n+++ b/searx/translations/tr/LC_MESSAGES/messages.po\n@@ -3,6 +3,7 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n+# Arda Kılıçdağı , 2018\n # Caner Başaran , 2014-2016\n # FIRST AUTHOR , 2014\n msgid \"\"\n@@ -10,8 +11,8 @@ msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-12-06 17:22+0000\\n\"\n+\"Last-Translator: Arda Kılıçdağı \\n\"\n \"Language-Team: Turkish (http://www.transifex.com/asciimoo/searx/language/tr/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -22,15 +23,15 @@ msgstr \"\"\n \n #: searx/search.py:137 searx/search.py:182\n msgid \"timeout\"\n-msgstr \"\"\n+msgstr \"zaman aşımı\"\n \n #: searx/search.py:144\n msgid \"request exception\"\n-msgstr \"\"\n+msgstr \"istekte bir hata oluştu\"\n \n #: searx/search.py:151\n msgid \"unexpected crash\"\n-msgstr \"\"\n+msgstr \"beklenmmeyen hata\"\n \n #: searx/webapp.py:136\n msgid \"files\"\n@@ -74,15 +75,15 @@ msgstr \"bilim\"\n \n #: searx/webapp.py:399 searx/webapp.py:658\n msgid \"Invalid settings, please edit your preferences\"\n-msgstr \"\"\n+msgstr \"Hatalı ayar girildi, lütfen ayarlarınızı kontrol edin\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n-msgstr \"\"\n+msgstr \"Hatalı ayar\"\n \n #: searx/webapp.py:449 searx/webapp.py:493\n msgid \"search error\"\n-msgstr \"\"\n+msgstr \"arama hatası\"\n \n #: searx/webapp.py:530\n msgid \"{minutes} minute(s) ago\"\n@@ -94,27 +95,27 @@ msgstr \"{hours} saat(), {minutes} dakika() önce\"\n \n #: searx/answerers/random/answerer.py:53\n msgid \"Random value generator\"\n-msgstr \"\"\n+msgstr \"Rastgele değer üretici\"\n \n #: searx/answerers/random/answerer.py:54\n msgid \"Generate different random values\"\n-msgstr \"\"\n+msgstr \"Farklı rastgele metinler üret\"\n \n #: searx/answerers/statistics/answerer.py:53\n msgid \"Statistics functions\"\n-msgstr \"\"\n+msgstr \"İstatistik fonksiyonları\"\n \n #: searx/answerers/statistics/answerer.py:54\n msgid \"Compute {functions} of the arguments\"\n-msgstr \"\"\n+msgstr \"Argümanların {functions} değerlerini hesapla\"\n \n #: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n msgid \"Engine time (sec)\"\n-msgstr \"\"\n+msgstr \"Motor cevap süresi (sn)\"\n \n #: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n msgid \"Page loads (sec)\"\n-msgstr \"Yüklenen sayfa (sn)\"\n+msgstr \"Sayfa yüklenmesi (sn)\"\n \n #: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n #: searx/templates/oscar/results.html:95\n@@ -124,11 +125,11 @@ msgstr \"Sonuç sayısı\"\n \n #: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n msgid \"Scores\"\n-msgstr \"\"\n+msgstr \"Skor\"\n \n #: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n msgid \"Scores per result\"\n-msgstr \"\"\n+msgstr \"Sonuç başına skor\"\n \n #: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n msgid \"Errors\"\n@@ -136,7 +137,7 @@ msgstr \"Hatalar\"\n \n #: searx/engines/pdbe.py:87\n msgid \"{title}&nbsp;(OBSOLETE)\"\n-msgstr \"\"\n+msgstr \"{title}&nbsp;(GEÇERSİZ)\"\n \n #: searx/engines/pdbe.py:91\n msgid \"This entry has been superseded by\"\ndiff --git a/searx/translations/uk/LC_MESSAGES/messages.mo b/searx/translations/uk/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..6610dfaa7f\nBinary files /dev/null and b/searx/translations/uk/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/uk/LC_MESSAGES/messages.po b/searx/translations/uk/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..7c6ac5aacf\n--- /dev/null\n+++ b/searx/translations/uk/LC_MESSAGES/messages.po\n@@ -0,0 +1,1015 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# pvhn4 , 2017\n+# pvhn4 , 2017\n+# zubr139, 2016-2017\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n+\"Last-Translator: Adam Tauber \\n\"\n+\"Language-Team: Ukrainian (http://www.transifex.com/asciimoo/searx/language/uk/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: uk\\n\"\n+\"Plural-Forms: nplurals=4; plural=(n % 1 == 0 && n % 10 == 1 && n % 100 != 11 ? 0 : n % 1 == 0 && n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 12 || n % 100 > 14) ? 1 : n % 1 == 0 && (n % 10 ==0 || (n % 10 >=5 && n % 10 <=9) || (n % 100 >=11 && n % 100 <=14 )) ? 2: 3);\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"файли\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"загальні\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"музика\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"соцмережі\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"зображення\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"відео\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"IT\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"новини\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"карти\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"наука\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"Невірні налаштування, будь ласка, зробіть зміни в налаштуваннях\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"помилка пошуку\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} хвилин тому\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"{hours} годин, {minutes} хвилин тому\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"Генератор випадкових значень\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"Створити різні випадкові значення\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"Функції статистики\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"Розрахувати {functions} аргументів\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"Час пошуку (сек)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"Час завантадення (сек)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Число результатів\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Влучань\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"Влучань за результат\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Помилок\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(OBSOLETE)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"Цей запис був змінений\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"За можливістю замінити в посиланнях HTTP на HTTPS\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"Нескінченна прокрутка\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"Автоматично завантажувати наступну сторінку при прокрутці поточної до кінця\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"Уникайте платіжних каналів шляхом переадресації на версії публікацій з відкритим доступом, коли це можливо\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Відкривати посилання результатів в нових вкладках\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"Типово результати відкриваються в тому ж вікні. Цей плагін змінює поведінку, щоб посилання відкривались типово в нових вкладках/вікнах. (Необхідний JavaScript)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"Пошук по обраній категорії\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"Виконувати пошук зразу при обранні категорії. Вимкнути вибір декількох категорій. (Необхідний JavaScript)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"Відображає IP-адресу при запиті \\\"ip\\\" та ваш user-agent при запиті \\\"user agent\\\".\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"Видалення URL-адреси трекера\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"Вилучіть аргументи трекера з поверненої URL-адреси\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"Гарячі клавіші Vim\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"Переміщення результатів пошуку за допомогою віртуальних клавіш (потрібно JavaScript). Натисніть клавішу \\\"h\\\" на головній сторінці або на сторінці результатів, щоб отримати допомогу.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"Сторінка не знайдена\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"Перейти до %(search_page)s.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"сторінки пошуку\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"опції\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"про сайт\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Опції\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Типові категорії\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Мова пошуку\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Стандартна мова\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Мова інтерфейсу\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Автозаповнення\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"Проксі для зображень\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Ввімкнено\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Вимкнено\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Метод\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"БезпечнийПошук\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Жорский\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Помірний\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Вимкнений\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Теми\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Колір\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Синій (типово)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Фіолетовий\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Зелений\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Блакитний\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Помаранчевий\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Червоний\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"Пошукові системи, які використовуються\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"Назва пошукової системи\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Категорія\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Дозволити\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Заблокувати\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"Налаштування зберігаються в ваших cookie-файлах, що дає нам змогу не зберігати ці відомості про вас.\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"Ці cookie-файли необхідні винятково для вашої зручності, ми не використовуємо ці cookie-файли, щоб відслідковувати вас.\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"зберегти\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Відновити стандартні налаштування\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"назад\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"Посилання на пошук\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Завантажити результати\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Відповіді\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Пропозиції\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"попередня сторінка\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"наступна сторінка\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Шукати...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"Статистика пошукової системи\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"в контексті\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Сідер\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Лічер\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"магнет-посилання\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"торрент-файл\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Натисніть лупу, щоб виконати пошук\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Результати в нових вкладках\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"Ввімк.\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"Вимк.\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"архівовано\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Додаткові налаштування\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Закрити\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Помилка!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Використовується\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"вільна система метапошуку, яка поважає вашу приватність\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"проксовано\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"Загальні\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Пошукові системи\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Плагіни\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Відповідачі\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Cookie-файли\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"Якій мові ви віддаєте перевагу для пошуку?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Змінити мову сайту\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Шукати підчас набору\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"Проксувати знайдені зображення за допомогою searx\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"Змінити спосіб відправки запитів, детальніше про методи запитів\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"Фільтр контенту\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Змінити вигляд сайту\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Обрати стиль для цієї теми\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Стиль\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Гарячі клавіші\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Часовий діапазон\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"Середній час\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"Максимальний час\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"Список модулів миттєвих відповідей searx.\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Назва\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Ключові слова\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Опис\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Приклади\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Це список cookie-файлів та їх значень, які searx зберігає на вашому комп'ютері.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"По цьому списку ви можете оцінити відкритість searx.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Ім'я cookie\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Значення\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Результати пошуку\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Посилання\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Розпочати пошук\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"статистика\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"За весь час\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"За останній день\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"За останній тиждень\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"За останній місяць\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"За останній рік\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"Отакої!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Схоже, що ви використовуєте searx вперше.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Інформація!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"в даний час cookie-файли не встановлені.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"В даний час немає доступних даних.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Вибачте!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"ми не знайшли жодних результатів. Будь ласка, використайте інший запит або виконайте пошук в декількох категоріях.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Чудово!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"Налаштування успішно збережені.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"От халепа!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Щось пішло не так.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"показати медіа\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"приховати медіа\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Завантажити зображення\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Переглянути джерело\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"показати карту\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"приховати карту\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"показати деталі\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"приховати деталі\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Розмір файла\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Байтів\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"КіБ\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"МіБ\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"ГіБ\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"ТіБ\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Кількість Файлів\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"показати відео\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"приховати відео\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Завантажити більше...\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Підтримка обраної мови\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"\"\ndiff --git a/searx/translations/vi/LC_MESSAGES/messages.mo b/searx/translations/vi/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..07dc309ea2\nBinary files /dev/null and b/searx/translations/vi/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/vi/LC_MESSAGES/messages.po b/searx/translations/vi/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..d8a1a0c943\n--- /dev/null\n+++ b/searx/translations/vi/LC_MESSAGES/messages.po\n@@ -0,0 +1,1013 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# dd721411 , 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-02-28 17:27+0000\\n\"\n+\"Last-Translator: dd721411 \\n\"\n+\"Language-Team: Vietnamese (http://www.transifex.com/asciimoo/searx/language/vi/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: vi\\n\"\n+\"Plural-Forms: nplurals=1; plural=0;\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"hết thời hạn\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"ngoại lệ yêu cầu\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"lỗi bất ngờ\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"các tập tin\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"tổng quát\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"âm nhạc\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"mạng xã hội\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"hình ảnh\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"phim\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"CNTT\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"tin tức\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"bản đồ\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"khoa học\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"Cài đặt không hợp lệ, xin xem lại tuỳ chỉnh\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"Cài đặt không hợp lệ\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"lỗi tìm kiếm\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} phút() trước\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"{hours} giờ(), {minutes} phút() trước\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"Trình tạo giá trị ngẫu nhiên\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"Tạo các giá trị ngẫu nhiên khác nhau\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"Các hàm thống kê\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"Tính toán {functions} của các đối số\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"Thời gian trình tìm kiếm (giây)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"Tải trang (giây)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"Số lượng kết quả\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"Điểm số\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"Điểm số cho từng kết quả\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"Các lỗi\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(LỖI THỜI)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"Mục này đã được thay thế bởi\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"Không có bản tóm tắt nào cho ấn phẩm này.\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"Viết lại các liên kết HTTP thành HTTPS khi có thể\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"Cuộn liên tục\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"Tự động tải trang kế tiếp khi cuộn đến cuối trang hiện tại\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"Viết lại DOI Truy Cập Miễn Phí\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"Tránh việc trả phí bằng cách chuyển hướng đến các phiên bản truy cập miễn phí của ấn phẩm khi có thể\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"Mở kết quả trên những thẻ trình duyệt mới\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"Theo mặc định thì các kết quả được mở trên cùng một cửa sổ. Phần mở rộng này sẽ ghi đè lên hành vi mặc định đó để mở các liên kết trên các thẻ/cửa sổ mới. (yêu cầu JavaScript)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"Tìm kiếm khi chọn danh mục đơn\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"Thực thi tìm kiếm ngay khi chọn một danh mục. Tắt đi để chọn nhiều danh mục. (yêu cầu JavaScript)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"Hiện IP của bạn khi gõ \\\"ip\\\" và hiện user agent khi gõ \\\"user agent\\\".\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"Trình loại bỏ URL theo dõi\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"Loại bỏ các đối số theo dõi từ URL trả về\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"Các phím tắt Vim-like\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"Điều hướng các kết quả tìm kiếm với các phím tắt Vim-like (yêu cầu JavaScript). Nhấn phím \\\"h\\\" trên trang chính hoặc trang kết quả để xem trợ giúp.\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"Không tìm thấy trang\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"Đi đến %(search_page)s.\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"tìm kiếm trang\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"tuỳ chỉnh\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"thông tin về\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"Tuỳ chỉnh\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"Các danh mục mặc định\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"Ngôn ngữ tìm kiếm\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"Ngôn ngữ mặc định\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"Ngôn ngữ giao diện\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"Gợi ý tự động\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"Proxy hình ảnh\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"Đã \"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"Đã tắt\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"Phương pháp\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"Tìm Kiếm An Toàn\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"Nghiêm ngặt\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"Vừa phải\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"Không\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"Nền\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"Màu sắc\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"Xanh lam (mặc định)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"Tím\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"Xanh lục\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"Lục lam\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"Cam\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"Đỏ\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"Các trình tìm kiếm đang được dùng\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"Tên trình tìm kiếm\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"Danh mục\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"Cho phép\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"Chặn\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"Những cài đặt này được lưu trữ trong các cookie, điều này cho phép chúng tôi không phải lưu các dữ liệu về bạn.\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"Những cookie này chỉ phục vụ cho chính bạn, chúng tôi không sử dụng chúng để theo dõi bạn.\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"lưu\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"Đưa về mặc định\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"trở về\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"URL Tìm kiếm\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"Tải về các kết quả\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"Các đáp án\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"Các gợi ý\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"trang liền trước\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"trang tiếp theo\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"Tìm kiếm về...\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"Các thông số về trình tìm kiếm\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"ngữ cảnh gốc\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"Seeder\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"Leecher\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"liên kết magnet\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"tập tin torrent\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"Nhấp vào hình kính lúp để tiến hành tìm kiếm\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"Hiện kết quả trên các thẻ mới\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"Bật\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"Tắt\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"đã lưu cache\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"Cài đặt nâng cao\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"Đóng\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"Lỗi!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"Được cung cấp bởi\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"một trình tìm kiếm đa nguồn, dễ tuỳ biến và tôn trọng quyền riêng tư\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"đã proxy\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"có hỗ trợ\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"không hỗ trợ\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"Tổng quát\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"Các trình tìm kiếm\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"Các phần mở rộng\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"Trình trả lời nhanh\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Các cookie\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"Bạn muốn tìm kiếm bằng ngôn ngữ nào?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"Thay đổi ngôn ngữ giao diện\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"Tìm kiếm ngay khi gõ\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"Proxy kết quả hình ảnh qua searx\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"Thay đổi cách thức các cụm từ tìm kiếm được gửi đi, tìm hiểu thêm về các phương thức tìm kiếm\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"Lọc các nội dung\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"Thay đổi giao diện searx\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"Chọn phong cách cho nền này\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"Phong cách\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"Trình xử lý DOI Truy Cập Miễn Phí\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"Chuyển hướng đến các phiên bản truy cập miễn phí của ấn phẩm khi có thể (yêu cầu phần mở rộng)\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"Lối tắt\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"Ngôn ngữ được chọn\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"Khoảng thời gian\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"Thời gian trung bình\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"Thời gian tối đa\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"Đây là danh sách các module trả lời nhanh của searx\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"Tên\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"Các từ khoá\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"Mô tả\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"Các ví dụ\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"Đây là danh sách các cookie và giá trị của chúng mà searx đang lưu trữ trên máy tính của bạn.\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"Với danh sách này, bạn có thể đánh giá tính minh bạch của searx.\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Tên cookie\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"Giá trị\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"URL tìm kiếm của tuỳ chỉnh được lưu hiện tại\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"Ghi chú: việc định rõ cài đặt cá nhân trong URL tìm kiếm có thể làm suy giảm mức độ riêng tư vì nó chuyển dữ liệu đến các trang kết quả được nhấp chọn.\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"Kết quả tìm kiếm\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"Thử tìm kiếm:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"Các trình tìm kiếm không nhận được kết quả\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"Các liên kết\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"Bắt đầu tìm kiếm\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"các thông số\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"Bất kỳ lúc nào\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"Hôm trước\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"Tuần trước\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"Tháng trước\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"Năm ngoái\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"Cẩn thận!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"Có vẻ như bạn mới sử dụng searx lần đầu.\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"Thông tin!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"hiện tại không có cookie nào.\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"Hiện không có dữ liệu nào.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"Các trình tìm kiếm không nhận được kết quả.\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"Xin thử lại lần nữa hoặc tìm một server searx khác\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"Xin lỗi!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"chúng tôi không tìm thấy kết quả nào. Xin gõ cụm từ khác hoặc tìm kiếm trong nhiều danh mục hơn.\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"Tốt lắm!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"Lưu cài đặt thành công.\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"Quái quỷ thật!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"Đã có sự cố.\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"hiện nội dung\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"ẩn nội dung\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"Xem hình ảnh\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"Xem nguồn\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"hiện bản đồ\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"ẩn bản đồ\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"hiện chi tiết\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"ẩn chi tiết\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"Kích thước tập tin\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"Byte\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"Số lượng tập tin\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"hiện \"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"ẩn phim\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"Tải thêm...\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"Không tìm thấy gì\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"Có hỗ trợ ngôn ngữ được chọn\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"Giao diện người dùng\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"Quyền riêng tư\"\ndiff --git a/searx/translations/zh_CN/LC_MESSAGES/messages.mo b/searx/translations/zh_CN/LC_MESSAGES/messages.mo\nindex b094298fdb..c2006aca13 100644\nBinary files a/searx/translations/zh_CN/LC_MESSAGES/messages.mo and b/searx/translations/zh_CN/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/zh_CN/LC_MESSAGES/messages.po b/searx/translations/zh_CN/LC_MESSAGES/messages.po\nindex f7a7068eb6..78acb2d40b 100644\n--- a/searx/translations/zh_CN/LC_MESSAGES/messages.po\n+++ b/searx/translations/zh_CN/LC_MESSAGES/messages.po\n@@ -3,15 +3,18 @@\n # This file is distributed under the same license as the PROJECT project.\n # \n # Translators:\n+# Crystal RainSlide, 2018\n+# Mingye Wang , 2018\n+# Sion Kazama, 2018\n # wenke, 2015\n-# wenke, 2015-2017\n+# wenke, 2015-2018\n msgid \"\"\n msgstr \"\"\n \"Project-Id-Version: searx\\n\"\n \"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n \"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n-\"PO-Revision-Date: 2017-11-01 20:31+0000\\n\"\n-\"Last-Translator: Adam Tauber \\n\"\n+\"PO-Revision-Date: 2018-12-10 08:32+0000\\n\"\n+\"Last-Translator: Crystal RainSlide\\n\"\n \"Language-Team: Chinese (China) (http://www.transifex.com/asciimoo/searx/language/zh_CN/)\\n\"\n \"MIME-Version: 1.0\\n\"\n \"Content-Type: text/plain; charset=UTF-8\\n\"\n@@ -58,7 +61,7 @@ msgstr \"视频\"\n \n #: searx/webapp.py:142\n msgid \"it\"\n-msgstr \"it\"\n+msgstr \"IT\"\n \n #: searx/webapp.py:143\n msgid \"news\"\n@@ -74,7 +77,7 @@ msgstr \"学术\"\n \n #: searx/webapp.py:399 searx/webapp.py:658\n msgid \"Invalid settings, please edit your preferences\"\n-msgstr \"无效的设置,请编辑你的首选项\"\n+msgstr \"设置无效,请编辑您的首选项\"\n \n #: searx/webapp.py:415\n msgid \"Invalid settings\"\n@@ -86,11 +89,11 @@ msgstr \"搜索错误\"\n \n #: searx/webapp.py:530\n msgid \"{minutes} minute(s) ago\"\n-msgstr \"{minutes}分钟之前\"\n+msgstr \"{minutes} 分钟前\"\n \n #: searx/webapp.py:532\n msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n-msgstr \"{hours}小时{minutes}分钟之前\"\n+msgstr \"{hours} 小时 {minutes} 分钟前\"\n \n #: searx/answerers/random/answerer.py:53\n msgid \"Random value generator\"\n@@ -106,7 +109,7 @@ msgstr \"统计功能\"\n \n #: searx/answerers/statistics/answerer.py:54\n msgid \"Compute {functions} of the arguments\"\n-msgstr \"计算 {functions}参数\"\n+msgstr \"计算 {functions} 参数\"\n \n #: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n msgid \"Engine time (sec)\"\n@@ -114,7 +117,7 @@ msgstr \"搜索引擎时间(秒)\"\n \n #: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n msgid \"Page loads (sec)\"\n-msgstr \"页面加载 (秒)\"\n+msgstr \"页面加载(秒)\"\n \n #: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n #: searx/templates/oscar/results.html:95\n@@ -128,7 +131,7 @@ msgstr \"得分\"\n \n #: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n msgid \"Scores per result\"\n-msgstr \"每个结果等分\"\n+msgstr \"每个结果的分数\"\n \n #: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n msgid \"Errors\"\n@@ -136,19 +139,19 @@ msgstr \"错误\"\n \n #: searx/engines/pdbe.py:87\n msgid \"{title}&nbsp;(OBSOLETE)\"\n-msgstr \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(过时)\"\n \n #: searx/engines/pdbe.py:91\n msgid \"This entry has been superseded by\"\n-msgstr \"此条目已经被取代\"\n+msgstr \"此条目已被取代\"\n \n #: searx/engines/pubmed.py:74\n msgid \"No abstract is available for this publication.\"\n-msgstr \"\"\n+msgstr \"本出版物没有摘要。\"\n \n #: searx/plugins/https_rewrite.py:32\n msgid \"Rewrite HTTP links to HTTPS if possible\"\n-msgstr \"尽可能重定向HTTP链接到HTTPS\"\n+msgstr \"将支持 HTTPS 的 HTTP 链接改为 HTTPS 链接\"\n \n #: searx/plugins/infinite_scroll.py:3\n msgid \"Infinite scroll\"\n@@ -160,13 +163,13 @@ msgstr \"滚动到当前页面底部时自动加载下一页\"\n \n #: searx/plugins/oa_doi_rewrite.py:9\n msgid \"Open Access DOI rewrite\"\n-msgstr \"\"\n+msgstr \"开放访问 DOI 重定向\"\n \n #: searx/plugins/oa_doi_rewrite.py:10\n msgid \"\"\n \"Avoid paywalls by redirecting to open-access versions of publications when \"\n \"available\"\n-msgstr \"重定向到可浏览的文章来避免付费墙(如果可用)\"\n+msgstr \"尽量重定向到开放访问的文章以避免付费墙(如果可用)\"\n \n #: searx/plugins/open_results_on_new_tab.py:18\n #: searx/templates/oscar/preferences.html:114\n@@ -178,7 +181,7 @@ msgstr \"在新标签页打开搜索链接\"\n msgid \"\"\n \"Results are opened in the same window by default. This plugin overwrites the\"\n \" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n-msgstr \"搜索结果默认在原窗口打开。这个插件使其在新标签页/窗口打开。(需要启用JavaScript )\"\n+msgstr \"搜索结果默认在原窗口打开。此插件能使其在新标签页或新窗口打开。(需启用 JavaScript)\"\n \n #: searx/plugins/search_on_category_select.py:18\n msgid \"Search on category select\"\n@@ -188,13 +191,13 @@ msgstr \"搜索类别选择\"\n msgid \"\"\n \"Perform search immediately if a category selected. Disable to select \"\n \"multiple categories. (JavaScript required)\"\n-msgstr \"选中一个类别立即搜索。禁用可以选择多个类别搜索。(需要启用JavaScript )\"\n+msgstr \"选中一个类别后立即执行搜索。禁用后,可以选择多个类别一起搜索。(需启用 JavaScript)\"\n \n #: searx/plugins/self_info.py:20\n msgid \"\"\n \"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n \"contains \\\"user agent\\\".\"\n-msgstr \"搜索“ip”显示你的ip以及搜索内容含有“user agent”显示你的user agent。\"\n+msgstr \"当您搜索「ip」时,这将会显示您的 IP 地址;同理,在搜索「user agent」时,将会显示您的 User Agent。\"\n \n #: searx/plugins/tracker_url_remover.py:26\n msgid \"Tracker URL remover\"\n@@ -206,13 +209,13 @@ msgstr \"从返回的链接中移除跟踪参数\"\n \n #: searx/plugins/vim_hotkeys.py:3\n msgid \"Vim-like hotkeys\"\n-msgstr \"类vim快捷键\"\n+msgstr \"Vim 式快捷键\"\n \n #: searx/plugins/vim_hotkeys.py:4\n msgid \"\"\n \"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n \"\\\"h\\\" key on main or result page to get help.\"\n-msgstr \"使用类vim快捷键浏览搜索结果(JavaScript启用)。按“h”键获取帮助。\"\n+msgstr \"使用 Vim 式快捷键浏览搜索结果(需要 JavaScript)。在主页或结果页按“h”键获取帮助。\"\n \n #: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n #: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n@@ -225,7 +228,7 @@ msgstr \"未找到网页\"\n #: searx/templates/simple/404.html:6\n #, python-format\n msgid \"Go to %(search_page)s.\"\n-msgstr \"返回%(search_page)s。\"\n+msgstr \"前往 %(search_page)s。\"\n \n #: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n #: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n@@ -342,7 +345,7 @@ msgstr \"安全搜索\"\n #: searx/templates/oscar/preferences.html:90\n #: searx/templates/simple/preferences.html:66\n msgid \"Strict\"\n-msgstr \"严格模式\"\n+msgstr \"严格\"\n \n #: searx/templates/courgette/preferences.html:67\n #: searx/templates/legacy/preferences.html:68\n@@ -356,7 +359,7 @@ msgstr \"中等\"\n #: searx/templates/oscar/preferences.html:92\n #: searx/templates/simple/preferences.html:68\n msgid \"None\"\n-msgstr \"不过滤\"\n+msgstr \"无\"\n \n #: searx/templates/courgette/preferences.html:73\n #: searx/templates/legacy/preferences.html:74\n@@ -444,7 +447,7 @@ msgstr \"阻止\"\n msgid \"\"\n \"These settings are stored in your cookies, this allows us not to store this \"\n \"data about you.\"\n-msgstr \"这些设置保存在你的cookie,我们将不能保存你的数据。\"\n+msgstr \"这些设置被存储在您的 cookie 中,这种保存设置的方式允许我们不存储您的设置数据。\"\n \n #: searx/templates/courgette/preferences.html:124\n #: searx/templates/legacy/preferences.html:121\n@@ -454,7 +457,7 @@ msgstr \"这些设置保存在你的cookie,我们将不能保存你的数据。\n msgid \"\"\n \"These cookies serve your sole convenience, we don't use these cookies to \"\n \"track you.\"\n-msgstr \"这些cookie是为了让你更加方便,我们不会使用cookie追踪你。\"\n+msgstr \"这些 Cookie 信息可辅助您便捷地使用服务,我们不会利用这些信息来追踪您。\"\n \n #: searx/templates/courgette/preferences.html:127\n #: searx/templates/legacy/preferences.html:124\n@@ -544,14 +547,14 @@ msgstr \"原始上下文\"\n #: searx/templates/oscar/result_templates/torrent.html:6\n #: searx/templates/simple/result_templates/torrent.html:9\n msgid \"Seeder\"\n-msgstr \"Seeder\"\n+msgstr \"做种\"\n \n #: searx/templates/courgette/result_templates/torrent.html:7\n #: searx/templates/legacy/result_templates/torrent.html:11\n #: searx/templates/oscar/result_templates/torrent.html:6\n #: searx/templates/simple/result_templates/torrent.html:9\n msgid \"Leecher\"\n-msgstr \"Leecher\"\n+msgstr \"接收\"\n \n #: searx/templates/courgette/result_templates/torrent.html:9\n #: searx/templates/legacy/result_templates/torrent.html:9\n@@ -570,7 +573,7 @@ msgstr \"种子文件\"\n #: searx/templates/legacy/categories.html:8\n #: searx/templates/simple/categories.html:6\n msgid \"Click on the magnifier to perform search\"\n-msgstr \"点击放大镜开始搜索\"\n+msgstr \"点击放大镜按钮开始搜索\"\n \n #: searx/templates/legacy/preferences.html:84\n #: searx/templates/oscar/preferences.html:113\n@@ -622,7 +625,7 @@ msgstr \"技术支持来自\"\n \n #: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n msgid \"a privacy-respecting, hackable metasearch engine\"\n-msgstr \"一个尊重隐私,可再开发的元搜索引擎\"\n+msgstr \"一个尊重隐私、可再开发的元搜索引擎\"\n \n #: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n #: searx/templates/simple/macros.html:43\n@@ -662,12 +665,12 @@ msgstr \"智能答复\"\n #: searx/templates/oscar/preferences.html:17\n #: searx/templates/oscar/preferences.html:272\n msgid \"Cookies\"\n-msgstr \"Cookie\"\n+msgstr \"Cookies\"\n \n #: searx/templates/oscar/preferences.html:42\n #: searx/templates/simple/preferences.html:48\n msgid \"What language do you prefer for search?\"\n-msgstr \"你搜索时更喜欢什么语言?\"\n+msgstr \"您更喜欢使用什么语言进行搜索?\"\n \n #: searx/templates/oscar/preferences.html:48\n #: searx/templates/simple/preferences.html:128\n@@ -682,14 +685,14 @@ msgstr \"自动补全搜索字词\"\n #: searx/templates/oscar/preferences.html:69\n #: searx/templates/simple/preferences.html:173\n msgid \"Proxying image results through searx\"\n-msgstr \"通过searx代理图片\"\n+msgstr \"通过 searx 代理图片结果\"\n \n #: searx/templates/oscar/preferences.html:78\n msgid \"\"\n \"Change how forms are submited, learn more about request methods\"\n-msgstr \"更改请求方法,深入了解请求方法\"\n+msgstr \"更改提交表单时使用的请求方法,深入了解请求方法\"\n \n #: searx/templates/oscar/preferences.html:87\n #: searx/templates/simple/preferences.html:71\n@@ -699,12 +702,12 @@ msgstr \"过滤内容\"\n #: searx/templates/oscar/preferences.html:97\n #: searx/templates/simple/preferences.html:139\n msgid \"Change searx layout\"\n-msgstr \"改变searx布局\"\n+msgstr \"改变 searx 布局\"\n \n #: searx/templates/oscar/preferences.html:106\n #: searx/templates/oscar/preferences.html:111\n msgid \"Choose style for this theme\"\n-msgstr \"选择这个主题的样式\"\n+msgstr \"选择此主题的样式\"\n \n #: searx/templates/oscar/preferences.html:106\n #: searx/templates/oscar/preferences.html:111\n@@ -713,13 +716,13 @@ msgstr \"样式\"\n \n #: searx/templates/oscar/preferences.html:122\n msgid \"Open Access DOI resolver\"\n-msgstr \"\"\n+msgstr \"开放访问 DOI 解析器\"\n \n #: searx/templates/oscar/preferences.html:123\n msgid \"\"\n \"Redirect to open-access versions of publications when available (plugin \"\n \"required)\"\n-msgstr \"\"\n+msgstr \"尽可能重定向到出版物的开放访问版本(需要插件)\"\n \n #: searx/templates/oscar/preferences.html:163\n #: searx/templates/oscar/preferences.html:175\n@@ -752,7 +755,7 @@ msgstr \"最大时间\"\n \n #: searx/templates/oscar/preferences.html:248\n msgid \"This is the list of searx's instant answering modules.\"\n-msgstr \"这是searx的即时回答模块。\"\n+msgstr \"这是 searx 的即时回答模块列表。\"\n \n #: searx/templates/oscar/preferences.html:252\n msgid \"Name\"\n@@ -768,21 +771,21 @@ msgstr \"描述\"\n \n #: searx/templates/oscar/preferences.html:255\n msgid \"Examples\"\n-msgstr \"例子\"\n+msgstr \"示例\"\n \n #: searx/templates/oscar/preferences.html:275\n msgid \"\"\n \"This is the list of cookies and their values searx is storing on your \"\n \"computer.\"\n-msgstr \"这里展示了searx保存在你的电脑上的cookie。\"\n+msgstr \"此列表展示了 searx 在您设备上存储的 cookie 信息。\"\n \n #: searx/templates/oscar/preferences.html:276\n msgid \"With that list, you can assess searx transparency.\"\n-msgstr \"有了这个列表,你可以评估searx透明度。\"\n+msgstr \"您可以基于此表格来评估 searx 的透明度。\"\n \n #: searx/templates/oscar/preferences.html:281\n msgid \"Cookie name\"\n-msgstr \"cookie名称\"\n+msgstr \"Cookie 名称\"\n \n #: searx/templates/oscar/preferences.html:282\n msgid \"Value\"\n@@ -796,7 +799,7 @@ msgstr \"当前保存选项的搜索链接\"\n msgid \"\"\n \"Note: specifying custom settings in the search URL can reduce privacy by \"\n \"leaking data to the clicked result sites.\"\n-msgstr \"注意:在搜索链接中保存自定义设置会把数据泄露给点击的网站泄漏隐私。\"\n+msgstr \"注意:在搜索链接中保存自定义设置会把数据泄露给点击的结果网站,从而泄漏隐私。\"\n \n #: searx/templates/oscar/results.html:17\n msgid \"Search results\"\n@@ -810,7 +813,7 @@ msgstr \"尝试搜索:\"\n #: searx/templates/oscar/results.html:100\n #: searx/templates/simple/results.html:25\n msgid \"Engines cannot retrieve results\"\n-msgstr \"引擎无法检索结果\"\n+msgstr \"引擎无法获取结果\"\n \n #: searx/templates/oscar/results.html:131\n msgid \"Links\"\n@@ -858,29 +861,29 @@ msgstr \"小心!\"\n \n #: searx/templates/oscar/messages/first_time.html:7\n msgid \"It look like you are using searx first time.\"\n-msgstr \"看起来你是第一次使用searx。\"\n+msgstr \"看来这是您第一次使用 searx。\"\n \n #: searx/templates/oscar/messages/no_cookies.html:3\n msgid \"Information!\"\n-msgstr \"信息!\"\n+msgstr \"注意!\"\n \n #: searx/templates/oscar/messages/no_cookies.html:4\n msgid \"currently, there are no cookies defined.\"\n-msgstr \"目前还未保存cookie。\"\n+msgstr \"目前,还没有任何信息被存储在 Cookie 中。\"\n \n #: searx/templates/oscar/messages/no_data_available.html:4\n msgid \"There is currently no data available. \"\n-msgstr \"目前没有数据可用。\"\n+msgstr \"目前没有可用的数据。\"\n \n #: searx/templates/oscar/messages/no_results.html:4\n #: searx/templates/simple/messages/no_results.html:4\n msgid \"Engines cannot retrieve results.\"\n-msgstr \"引擎无法检索结果。\"\n+msgstr \"引擎无法获取结果。\"\n \n #: searx/templates/oscar/messages/no_results.html:10\n #: searx/templates/simple/messages/no_results.html:10\n msgid \"Please, try again later or find another searx instance.\"\n-msgstr \"请稍后再试或使用其他的searx实例。\"\n+msgstr \"请稍后再试,或寻找其它的 searx 实例替代。\"\n \n #: searx/templates/oscar/messages/no_results.html:14\n #: searx/templates/simple/messages/no_results.html:14\n@@ -892,7 +895,7 @@ msgstr \"抱歉!\"\n msgid \"\"\n \"we didn't find any results. Please use another query or search in more \"\n \"categories.\"\n-msgstr \"我们没有找到任何结果。请换用其他词语或在更多类别中搜索。\"\n+msgstr \"我们没有找到任何结果。请使用其它关键词或在更多类别中搜索。\"\n \n #: searx/templates/oscar/messages/save_settings_successfull.html:7\n msgid \"Well done!\"\n@@ -961,22 +964,22 @@ msgstr \"B\"\n #: searx/templates/oscar/result_templates/torrent.html:10\n #: searx/templates/simple/result_templates/torrent.html:13\n msgid \"kiB\"\n-msgstr \"KB\"\n+msgstr \"kiB\"\n \n #: searx/templates/oscar/result_templates/torrent.html:11\n #: searx/templates/simple/result_templates/torrent.html:14\n msgid \"MiB\"\n-msgstr \"MB\"\n+msgstr \"MiB\"\n \n #: searx/templates/oscar/result_templates/torrent.html:12\n #: searx/templates/simple/result_templates/torrent.html:15\n msgid \"GiB\"\n-msgstr \"GB\"\n+msgstr \"GiB\"\n \n #: searx/templates/oscar/result_templates/torrent.html:13\n #: searx/templates/simple/result_templates/torrent.html:16\n msgid \"TiB\"\n-msgstr \"TB\"\n+msgstr \"TiB\"\n \n #: searx/templates/oscar/result_templates/torrent.html:15\n #: searx/templates/simple/result_templates/torrent.html:20\n@@ -995,7 +998,7 @@ msgstr \"隐藏视频\"\n \n #: searx/templates/pix-art/results.html:28\n msgid \"Load more...\"\n-msgstr \"载入更多...\"\n+msgstr \"载入更多……\"\n \n #: searx/templates/simple/base.html:31\n msgid \"No item found\"\n@@ -1003,7 +1006,7 @@ msgstr \"未找到条目\"\n \n #: searx/templates/simple/preferences.html:89\n msgid \"Supports selected language\"\n-msgstr \"是否支持选定的语言\"\n+msgstr \"支持选定的语言\"\n \n #: searx/templates/simple/preferences.html:118\n msgid \"User interface\"\ndiff --git a/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo b/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo\nindex b7e099977f..0b2a3c921c 100644\nBinary files a/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo and b/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/zh_TW/LC_MESSAGES/messages.mo b/searx/translations/zh_TW/LC_MESSAGES/messages.mo\nnew file mode 100644\nindex 0000000000..b6d43e2a89\nBinary files /dev/null and b/searx/translations/zh_TW/LC_MESSAGES/messages.mo differ\ndiff --git a/searx/translations/zh_TW/LC_MESSAGES/messages.po b/searx/translations/zh_TW/LC_MESSAGES/messages.po\nnew file mode 100644\nindex 0000000000..0a4796f97e\n--- /dev/null\n+++ b/searx/translations/zh_TW/LC_MESSAGES/messages.po\n@@ -0,0 +1,1015 @@\n+# Translations template for PROJECT.\n+# Copyright (C) 2017 ORGANIZATION\n+# This file is distributed under the same license as the PROJECT project.\n+# \n+# Translators:\n+# FIRST AUTHOR , 2016\n+# Jeff Huang , 2017\n+# Mingye Wang , 2018\n+msgid \"\"\n+msgstr \"\"\n+\"Project-Id-Version: searx\\n\"\n+\"Report-Msgid-Bugs-To: EMAIL@ADDRESS\\n\"\n+\"POT-Creation-Date: 2017-11-01 21:31+0100\\n\"\n+\"PO-Revision-Date: 2018-09-16 00:29+0000\\n\"\n+\"Last-Translator: Mingye Wang \\n\"\n+\"Language-Team: Chinese (Taiwan) (http://www.transifex.com/asciimoo/searx/language/zh_TW/)\\n\"\n+\"MIME-Version: 1.0\\n\"\n+\"Content-Type: text/plain; charset=UTF-8\\n\"\n+\"Content-Transfer-Encoding: 8bit\\n\"\n+\"Generated-By: Babel 2.3.4\\n\"\n+\"Language: zh_TW\\n\"\n+\"Plural-Forms: nplurals=1; plural=0;\\n\"\n+\n+#: searx/search.py:137 searx/search.py:182\n+msgid \"timeout\"\n+msgstr \"逾時\"\n+\n+#: searx/search.py:144\n+msgid \"request exception\"\n+msgstr \"請求例外\"\n+\n+#: searx/search.py:151\n+msgid \"unexpected crash\"\n+msgstr \"未預期的當機\"\n+\n+#: searx/webapp.py:136\n+msgid \"files\"\n+msgstr \"檔案\"\n+\n+#: searx/webapp.py:137\n+msgid \"general\"\n+msgstr \"一般\"\n+\n+#: searx/webapp.py:138\n+msgid \"music\"\n+msgstr \"音樂\"\n+\n+#: searx/webapp.py:139\n+msgid \"social media\"\n+msgstr \"社群媒體\"\n+\n+#: searx/webapp.py:140\n+msgid \"images\"\n+msgstr \"圖片\"\n+\n+#: searx/webapp.py:141\n+msgid \"videos\"\n+msgstr \"影片\"\n+\n+#: searx/webapp.py:142\n+msgid \"it\"\n+msgstr \"IT\"\n+\n+#: searx/webapp.py:143\n+msgid \"news\"\n+msgstr \"新聞\"\n+\n+#: searx/webapp.py:144\n+msgid \"map\"\n+msgstr \"地圖\"\n+\n+#: searx/webapp.py:145\n+msgid \"science\"\n+msgstr \"科學\"\n+\n+#: searx/webapp.py:399 searx/webapp.py:658\n+msgid \"Invalid settings, please edit your preferences\"\n+msgstr \"無效的設定,請編輯您的偏好設定\"\n+\n+#: searx/webapp.py:415\n+msgid \"Invalid settings\"\n+msgstr \"無效的設定\"\n+\n+#: searx/webapp.py:449 searx/webapp.py:493\n+msgid \"search error\"\n+msgstr \"搜尋錯誤\"\n+\n+#: searx/webapp.py:530\n+msgid \"{minutes} minute(s) ago\"\n+msgstr \"{minutes} 分鐘前\"\n+\n+#: searx/webapp.py:532\n+msgid \"{hours} hour(s), {minutes} minute(s) ago\"\n+msgstr \"{hours} 小時 {minutes} 分鐘前\"\n+\n+#: searx/answerers/random/answerer.py:53\n+msgid \"Random value generator\"\n+msgstr \"隨機數值產生器\"\n+\n+#: searx/answerers/random/answerer.py:54\n+msgid \"Generate different random values\"\n+msgstr \"生成不同的隨機數值\"\n+\n+#: searx/answerers/statistics/answerer.py:53\n+msgid \"Statistics functions\"\n+msgstr \"統計功能\"\n+\n+#: searx/answerers/statistics/answerer.py:54\n+msgid \"Compute {functions} of the arguments\"\n+msgstr \"計算 {functions} 參數\"\n+\n+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201\n+msgid \"Engine time (sec)\"\n+msgstr \"引擎時間(秒)\"\n+\n+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205\n+msgid \"Page loads (sec)\"\n+msgstr \"頁面載入(秒)\"\n+\n+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209\n+#: searx/templates/oscar/results.html:95\n+#: searx/templates/simple/results.html:20\n+msgid \"Number of results\"\n+msgstr \"結果筆數\"\n+\n+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213\n+msgid \"Scores\"\n+msgstr \"分數\"\n+\n+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217\n+msgid \"Scores per result\"\n+msgstr \"每個結果的分數\"\n+\n+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221\n+msgid \"Errors\"\n+msgstr \"錯誤\"\n+\n+#: searx/engines/pdbe.py:87\n+msgid \"{title}&nbsp;(OBSOLETE)\"\n+msgstr \"{title}&nbsp;(已過時)\"\n+\n+#: searx/engines/pdbe.py:91\n+msgid \"This entry has been superseded by\"\n+msgstr \"此條目已被取代\"\n+\n+#: searx/engines/pubmed.py:74\n+msgid \"No abstract is available for this publication.\"\n+msgstr \"此出版品無可用摘要。\"\n+\n+#: searx/plugins/https_rewrite.py:32\n+msgid \"Rewrite HTTP links to HTTPS if possible\"\n+msgstr \"可以的話將 HTTP 連結重寫為 HTTPS\"\n+\n+#: searx/plugins/infinite_scroll.py:3\n+msgid \"Infinite scroll\"\n+msgstr \"無限捲動\"\n+\n+#: searx/plugins/infinite_scroll.py:4\n+msgid \"Automatically load next page when scrolling to bottom of current page\"\n+msgstr \"當捲動至目前頁面的底部時自動載入下一頁\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:9\n+msgid \"Open Access DOI rewrite\"\n+msgstr \"開放存取 DOI 重寫\"\n+\n+#: searx/plugins/oa_doi_rewrite.py:10\n+msgid \"\"\n+\"Avoid paywalls by redirecting to open-access versions of publications when \"\n+\"available\"\n+msgstr \"盡可能重新導向至出版品的開放存取版本,來避免付費牆\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:18\n+#: searx/templates/oscar/preferences.html:114\n+#: searx/templates/simple/preferences.html:149\n+msgid \"Open result links on new browser tabs\"\n+msgstr \"在新瀏覽器分頁中開啟結果連結\"\n+\n+#: searx/plugins/open_results_on_new_tab.py:19\n+msgid \"\"\n+\"Results are opened in the same window by default. This plugin overwrites the\"\n+\" default behaviour to open links on new tabs/windows. (JavaScript required)\"\n+msgstr \"結果預設會在同一個視窗開啟。這個外掛程式會覆寫預設行為,會在新分頁/視窗中開啟連結。(需要 JavaScript)\"\n+\n+#: searx/plugins/search_on_category_select.py:18\n+msgid \"Search on category select\"\n+msgstr \"類別選取搜尋\"\n+\n+#: searx/plugins/search_on_category_select.py:19\n+msgid \"\"\n+\"Perform search immediately if a category selected. Disable to select \"\n+\"multiple categories. (JavaScript required)\"\n+msgstr \"若分類被選取時立刻執行搜尋。停用以選取多個分類。(需要 JavaScript)\"\n+\n+#: searx/plugins/self_info.py:20\n+msgid \"\"\n+\"Displays your IP if the query is \\\"ip\\\" and your user agent if the query \"\n+\"contains \\\"user agent\\\".\"\n+msgstr \"若搜尋字串爲「ip」則顯示您的 IP,而若是「user agent」則顯示您的使用者代理字串。\"\n+\n+#: searx/plugins/tracker_url_remover.py:26\n+msgid \"Tracker URL remover\"\n+msgstr \"追蹤器 URL 移除器\"\n+\n+#: searx/plugins/tracker_url_remover.py:27\n+msgid \"Remove trackers arguments from the returned URL\"\n+msgstr \"從傳回的 URL 中移除追蹤器參數\"\n+\n+#: searx/plugins/vim_hotkeys.py:3\n+msgid \"Vim-like hotkeys\"\n+msgstr \"類 Vim 快捷鍵\"\n+\n+#: searx/plugins/vim_hotkeys.py:4\n+msgid \"\"\n+\"Navigate search results with Vim-like hotkeys (JavaScript required). Press \"\n+\"\\\"h\\\" key on main or result page to get help.\"\n+msgstr \"以類 Vim 的快捷鍵導覽搜尋結果(需要 JavaScript)。在主頁面或結果頁面按「h」鍵以取得說明。\"\n+\n+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4\n+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4\n+#: searx/templates/simple/404.html:4\n+msgid \"Page not found\"\n+msgstr \"找不到頁面\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+#, python-format\n+msgid \"Go to %(search_page)s.\"\n+msgstr \"到 %(search_page)s。\"\n+\n+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6\n+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6\n+#: searx/templates/simple/404.html:6\n+msgid \"search page\"\n+msgstr \"搜尋頁面\"\n+\n+#: searx/templates/courgette/index.html:9\n+#: searx/templates/courgette/index.html:13\n+#: searx/templates/courgette/results.html:5\n+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12\n+#: searx/templates/oscar/navbar.html:7\n+#: searx/templates/oscar/preferences.html:3\n+#: searx/templates/pix-art/index.html:8\n+msgid \"preferences\"\n+msgstr \"偏好設定\"\n+\n+#: searx/templates/courgette/index.html:11\n+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2\n+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7\n+msgid \"about\"\n+msgstr \"關於\"\n+\n+#: searx/templates/courgette/preferences.html:5\n+#: searx/templates/legacy/preferences.html:5\n+#: searx/templates/oscar/preferences.html:8\n+#: searx/templates/pix-art/preferences.html:5\n+#: searx/templates/simple/preferences.html:26\n+msgid \"Preferences\"\n+msgstr \"偏好設定\"\n+\n+#: searx/templates/courgette/preferences.html:9\n+#: searx/templates/legacy/preferences.html:9\n+#: searx/templates/oscar/preferences.html:33\n+#: searx/templates/oscar/preferences.html:35\n+#: searx/templates/simple/preferences.html:34\n+msgid \"Default categories\"\n+msgstr \"預設分類\"\n+\n+#: searx/templates/courgette/preferences.html:13\n+#: searx/templates/legacy/preferences.html:14\n+#: searx/templates/oscar/preferences.html:41\n+#: searx/templates/pix-art/preferences.html:9\n+#: searx/templates/simple/preferences.html:39\n+#: searx/templates/simple/preferences.html:163\n+msgid \"Search language\"\n+msgstr \"搜尋語言\"\n+\n+#: searx/templates/courgette/preferences.html:16\n+#: searx/templates/legacy/preferences.html:17\n+#: searx/templates/oscar/languages.html:6\n+#: searx/templates/pix-art/preferences.html:12\n+#: searx/templates/simple/languages.html:2\n+#: searx/templates/simple/preferences.html:42\n+msgid \"Default language\"\n+msgstr \"預設語言\"\n+\n+#: searx/templates/courgette/preferences.html:24\n+#: searx/templates/legacy/preferences.html:25\n+#: searx/templates/oscar/preferences.html:47\n+#: searx/templates/pix-art/preferences.html:20\n+#: searx/templates/simple/preferences.html:120\n+msgid \"Interface language\"\n+msgstr \"介面語言\"\n+\n+#: searx/templates/courgette/preferences.html:34\n+#: searx/templates/legacy/preferences.html:35\n+#: searx/templates/oscar/preferences.html:57\n+#: searx/templates/simple/preferences.html:51\n+msgid \"Autocomplete\"\n+msgstr \"自動完成\"\n+\n+#: searx/templates/courgette/preferences.html:45\n+#: searx/templates/legacy/preferences.html:46\n+#: searx/templates/oscar/preferences.html:68\n+#: searx/templates/simple/preferences.html:166\n+msgid \"Image proxy\"\n+msgstr \"圖片代理伺服器\"\n+\n+#: searx/templates/courgette/preferences.html:48\n+#: searx/templates/legacy/preferences.html:49\n+#: searx/templates/oscar/preferences.html:72\n+#: searx/templates/simple/preferences.html:169\n+msgid \"Enabled\"\n+msgstr \"已啟用\"\n+\n+#: searx/templates/courgette/preferences.html:49\n+#: searx/templates/legacy/preferences.html:50\n+#: searx/templates/oscar/preferences.html:73\n+#: searx/templates/simple/preferences.html:170\n+msgid \"Disabled\"\n+msgstr \"已停用\"\n+\n+#: searx/templates/courgette/preferences.html:54\n+#: searx/templates/legacy/preferences.html:55\n+#: searx/templates/oscar/preferences.html:77\n+#: searx/templates/pix-art/preferences.html:30\n+#: searx/templates/simple/preferences.html:156\n+msgid \"Method\"\n+msgstr \"方法\"\n+\n+#: searx/templates/courgette/preferences.html:63\n+#: searx/templates/legacy/preferences.html:64\n+#: searx/templates/oscar/preferences.html:86\n+#: searx/templates/oscar/preferences.html:165\n+#: searx/templates/oscar/preferences.html:173\n+#: searx/templates/simple/preferences.html:63\n+#: searx/templates/simple/preferences.html:90\n+msgid \"SafeSearch\"\n+msgstr \"安全搜尋\"\n+\n+#: searx/templates/courgette/preferences.html:66\n+#: searx/templates/legacy/preferences.html:67\n+#: searx/templates/oscar/preferences.html:90\n+#: searx/templates/simple/preferences.html:66\n+msgid \"Strict\"\n+msgstr \"嚴格\"\n+\n+#: searx/templates/courgette/preferences.html:67\n+#: searx/templates/legacy/preferences.html:68\n+#: searx/templates/oscar/preferences.html:91\n+#: searx/templates/simple/preferences.html:67\n+msgid \"Moderate\"\n+msgstr \"適中\"\n+\n+#: searx/templates/courgette/preferences.html:68\n+#: searx/templates/legacy/preferences.html:69\n+#: searx/templates/oscar/preferences.html:92\n+#: searx/templates/simple/preferences.html:68\n+msgid \"None\"\n+msgstr \"無\"\n+\n+#: searx/templates/courgette/preferences.html:73\n+#: searx/templates/legacy/preferences.html:74\n+#: searx/templates/oscar/preferences.html:96\n+#: searx/templates/pix-art/preferences.html:39\n+#: searx/templates/simple/preferences.html:131\n+msgid \"Themes\"\n+msgstr \"佈景主題\"\n+\n+#: searx/templates/courgette/preferences.html:83\n+msgid \"Color\"\n+msgstr \"顏色\"\n+\n+#: searx/templates/courgette/preferences.html:86\n+msgid \"Blue (default)\"\n+msgstr \"藍色(預設值)\"\n+\n+#: searx/templates/courgette/preferences.html:87\n+msgid \"Violet\"\n+msgstr \"紫色\"\n+\n+#: searx/templates/courgette/preferences.html:88\n+msgid \"Green\"\n+msgstr \"綠色\"\n+\n+#: searx/templates/courgette/preferences.html:89\n+msgid \"Cyan\"\n+msgstr \"青色\"\n+\n+#: searx/templates/courgette/preferences.html:90\n+msgid \"Orange\"\n+msgstr \"橘色\"\n+\n+#: searx/templates/courgette/preferences.html:91\n+msgid \"Red\"\n+msgstr \"紅色\"\n+\n+#: searx/templates/courgette/preferences.html:96\n+#: searx/templates/legacy/preferences.html:93\n+#: searx/templates/pix-art/preferences.html:49\n+#: searx/templates/simple/preferences.html:77\n+msgid \"Currently used search engines\"\n+msgstr \"目前使用的搜尋引擎\"\n+\n+#: searx/templates/courgette/preferences.html:100\n+#: searx/templates/legacy/preferences.html:97\n+#: searx/templates/oscar/preferences.html:162\n+#: searx/templates/oscar/preferences.html:176\n+#: searx/templates/pix-art/preferences.html:53\n+#: searx/templates/simple/preferences.html:87\n+msgid \"Engine name\"\n+msgstr \"引擎名稱\"\n+\n+#: searx/templates/courgette/preferences.html:101\n+#: searx/templates/legacy/preferences.html:98\n+msgid \"Category\"\n+msgstr \"分類\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:113\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:110\n+#: searx/templates/oscar/preferences.html:161\n+#: searx/templates/oscar/preferences.html:177\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:64\n+#: searx/templates/simple/preferences.html:86\n+msgid \"Allow\"\n+msgstr \"允許\"\n+\n+#: searx/templates/courgette/preferences.html:102\n+#: searx/templates/courgette/preferences.html:114\n+#: searx/templates/legacy/preferences.html:99\n+#: searx/templates/legacy/preferences.html:111\n+#: searx/templates/pix-art/preferences.html:54\n+#: searx/templates/pix-art/preferences.html:65\n+msgid \"Block\"\n+msgstr \"封鎖\"\n+\n+#: searx/templates/courgette/preferences.html:122\n+#: searx/templates/legacy/preferences.html:119\n+#: searx/templates/oscar/preferences.html:297\n+#: searx/templates/pix-art/preferences.html:73\n+#: searx/templates/simple/preferences.html:180\n+msgid \"\"\n+\"These settings are stored in your cookies, this allows us not to store this \"\n+\"data about you.\"\n+msgstr \"這些設定只會儲存在您的 cookies 中,這樣我們無需也不會存儲關於您的資訊。\"\n+\n+#: searx/templates/courgette/preferences.html:124\n+#: searx/templates/legacy/preferences.html:121\n+#: searx/templates/oscar/preferences.html:299\n+#: searx/templates/pix-art/preferences.html:75\n+#: searx/templates/simple/preferences.html:182\n+msgid \"\"\n+\"These cookies serve your sole convenience, we don't use these cookies to \"\n+\"track you.\"\n+msgstr \"這些 cookies 僅做為提供您方便之用,我們不會使用這些 cookies 來追蹤您。\"\n+\n+#: searx/templates/courgette/preferences.html:127\n+#: searx/templates/legacy/preferences.html:124\n+#: searx/templates/oscar/preferences.html:305\n+#: searx/templates/pix-art/preferences.html:78\n+#: searx/templates/simple/preferences.html:185\n+msgid \"save\"\n+msgstr \"儲存\"\n+\n+#: searx/templates/courgette/preferences.html:128\n+#: searx/templates/legacy/preferences.html:125\n+#: searx/templates/oscar/preferences.html:307\n+#: searx/templates/simple/preferences.html:186\n+msgid \"Reset defaults\"\n+msgstr \"重設為預設值\"\n+\n+#: searx/templates/courgette/preferences.html:129\n+#: searx/templates/legacy/preferences.html:126\n+#: searx/templates/oscar/preferences.html:306\n+#: searx/templates/pix-art/preferences.html:79\n+#: searx/templates/simple/preferences.html:187\n+msgid \"back\"\n+msgstr \"返回\"\n+\n+#: searx/templates/courgette/results.html:12\n+#: searx/templates/legacy/results.html:13\n+#: searx/templates/oscar/results.html:136\n+#: searx/templates/simple/results.html:58\n+msgid \"Search URL\"\n+msgstr \"搜尋網址\"\n+\n+#: searx/templates/courgette/results.html:16\n+#: searx/templates/legacy/results.html:17\n+#: searx/templates/oscar/results.html:141\n+#: searx/templates/simple/results.html:62\n+msgid \"Download results\"\n+msgstr \"下載結果\"\n+\n+#: searx/templates/courgette/results.html:34\n+#: searx/templates/legacy/results.html:35\n+#: searx/templates/simple/results.html:10\n+msgid \"Answers\"\n+msgstr \"答案\"\n+\n+#: searx/templates/courgette/results.html:42\n+#: searx/templates/legacy/results.html:43\n+#: searx/templates/oscar/results.html:116\n+#: searx/templates/simple/results.html:42\n+msgid \"Suggestions\"\n+msgstr \"建議\"\n+\n+#: searx/templates/courgette/results.html:70\n+#: searx/templates/legacy/results.html:81\n+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78\n+#: searx/templates/simple/results.html:130\n+msgid \"previous page\"\n+msgstr \"上一頁\"\n+\n+#: searx/templates/courgette/results.html:81\n+#: searx/templates/legacy/results.html:92\n+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84\n+#: searx/templates/simple/results.html:145\n+msgid \"next page\"\n+msgstr \"下一頁\"\n+\n+#: searx/templates/courgette/search.html:3\n+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6\n+#: searx/templates/oscar/search_full.html:9\n+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4\n+msgid \"Search for...\"\n+msgstr \"搜尋……\"\n+\n+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4\n+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4\n+#: searx/templates/simple/stats.html:7\n+msgid \"Engine stats\"\n+msgstr \"引擎統計\"\n+\n+#: searx/templates/courgette/result_templates/images.html:4\n+#: searx/templates/legacy/result_templates/images.html:4\n+#: searx/templates/pix-art/result_templates/images.html:4\n+msgid \"original context\"\n+msgstr \"原始內容\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Seeder\"\n+msgstr \"播種者\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:7\n+#: searx/templates/legacy/result_templates/torrent.html:11\n+#: searx/templates/oscar/result_templates/torrent.html:6\n+#: searx/templates/simple/result_templates/torrent.html:9\n+msgid \"Leecher\"\n+msgstr \"接收者\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:9\n+#: searx/templates/legacy/result_templates/torrent.html:9\n+#: searx/templates/oscar/macros.html:23\n+#: searx/templates/simple/result_templates/torrent.html:6\n+msgid \"magnet link\"\n+msgstr \"磁力連結\"\n+\n+#: searx/templates/courgette/result_templates/torrent.html:10\n+#: searx/templates/legacy/result_templates/torrent.html:10\n+#: searx/templates/oscar/macros.html:24\n+#: searx/templates/simple/result_templates/torrent.html:7\n+msgid \"torrent file\"\n+msgstr \"torrent 檔案\"\n+\n+#: searx/templates/legacy/categories.html:8\n+#: searx/templates/simple/categories.html:6\n+msgid \"Click on the magnifier to perform search\"\n+msgstr \"在磁鐵上點選以執行搜尋\"\n+\n+#: searx/templates/legacy/preferences.html:84\n+#: searx/templates/oscar/preferences.html:113\n+#: searx/templates/simple/preferences.html:142\n+msgid \"Results on new tabs\"\n+msgstr \"在新分頁開啟結果\"\n+\n+#: searx/templates/legacy/preferences.html:87\n+#: searx/templates/oscar/preferences.html:117\n+#: searx/templates/simple/preferences.html:145\n+msgid \"On\"\n+msgstr \"開啟\"\n+\n+#: searx/templates/legacy/preferences.html:88\n+#: searx/templates/oscar/preferences.html:118\n+#: searx/templates/simple/preferences.html:146\n+msgid \"Off\"\n+msgstr \"關閉\"\n+\n+#: searx/templates/legacy/result_templates/code.html:3\n+#: searx/templates/legacy/result_templates/default.html:3\n+#: searx/templates/legacy/result_templates/map.html:9\n+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48\n+#: searx/templates/simple/macros.html:43\n+msgid \"cached\"\n+msgstr \"已快取\"\n+\n+#: searx/templates/oscar/advanced.html:4\n+msgid \"Advanced settings\"\n+msgstr \"進階設定\"\n+\n+#: searx/templates/oscar/base.html:62\n+#: searx/templates/oscar/messages/first_time.html:4\n+#: searx/templates/oscar/messages/save_settings_successfull.html:5\n+#: searx/templates/oscar/messages/unknow_error.html:5\n+msgid \"Close\"\n+msgstr \"關閉\"\n+\n+#: searx/templates/oscar/base.html:64\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+#: searx/templates/simple/results.html:25\n+msgid \"Error!\"\n+msgstr \"錯誤!\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"Powered by\"\n+msgstr \"技術支援\"\n+\n+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55\n+msgid \"a privacy-respecting, hackable metasearch engine\"\n+msgstr \"一個尊重隱私,可再開發的集合式搜尋引擎\"\n+\n+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50\n+#: searx/templates/simple/macros.html:43\n+msgid \"proxied\"\n+msgstr \"已代理\"\n+\n+#: searx/templates/oscar/macros.html:92\n+msgid \"supported\"\n+msgstr \"支援\"\n+\n+#: searx/templates/oscar/macros.html:96\n+msgid \"not supported\"\n+msgstr \"不支援\"\n+\n+#: searx/templates/oscar/preferences.html:13\n+#: searx/templates/oscar/preferences.html:22\n+#: searx/templates/simple/preferences.html:32\n+msgid \"General\"\n+msgstr \"一般\"\n+\n+#: searx/templates/oscar/preferences.html:14\n+#: searx/templates/oscar/preferences.html:146\n+#: searx/templates/simple/preferences.html:76\n+msgid \"Engines\"\n+msgstr \"引擎\"\n+\n+#: searx/templates/oscar/preferences.html:15\n+#: searx/templates/oscar/preferences.html:219\n+msgid \"Plugins\"\n+msgstr \"外掛程式\"\n+\n+#: searx/templates/oscar/preferences.html:16\n+#: searx/templates/oscar/preferences.html:245\n+msgid \"Answerers\"\n+msgstr \"答案\"\n+\n+#: searx/templates/oscar/preferences.html:17\n+#: searx/templates/oscar/preferences.html:272\n+msgid \"Cookies\"\n+msgstr \"Cookies\"\n+\n+#: searx/templates/oscar/preferences.html:42\n+#: searx/templates/simple/preferences.html:48\n+msgid \"What language do you prefer for search?\"\n+msgstr \"您偏好用哪種語言搜尋?\"\n+\n+#: searx/templates/oscar/preferences.html:48\n+#: searx/templates/simple/preferences.html:128\n+msgid \"Change the language of the layout\"\n+msgstr \"變更佈局的語言\"\n+\n+#: searx/templates/oscar/preferences.html:58\n+#: searx/templates/simple/preferences.html:60\n+msgid \"Find stuff as you type\"\n+msgstr \"隨打即找\"\n+\n+#: searx/templates/oscar/preferences.html:69\n+#: searx/templates/simple/preferences.html:173\n+msgid \"Proxying image results through searx\"\n+msgstr \"透過 searx 代理圖片結果\"\n+\n+#: searx/templates/oscar/preferences.html:78\n+msgid \"\"\n+\"Change how forms are submited, learn more about request methods\"\n+msgstr \"變更遞交形式,看看更多關於請求方法的介紹\"\n+\n+#: searx/templates/oscar/preferences.html:87\n+#: searx/templates/simple/preferences.html:71\n+msgid \"Filter content\"\n+msgstr \"過濾內容\"\n+\n+#: searx/templates/oscar/preferences.html:97\n+#: searx/templates/simple/preferences.html:139\n+msgid \"Change searx layout\"\n+msgstr \"變更 searx 佈局\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Choose style for this theme\"\n+msgstr \"選擇這個主題的樣式\"\n+\n+#: searx/templates/oscar/preferences.html:106\n+#: searx/templates/oscar/preferences.html:111\n+msgid \"Style\"\n+msgstr \"樣式\"\n+\n+#: searx/templates/oscar/preferences.html:122\n+msgid \"Open Access DOI resolver\"\n+msgstr \"開放存取 DOI 解析器\"\n+\n+#: searx/templates/oscar/preferences.html:123\n+msgid \"\"\n+\"Redirect to open-access versions of publications when available (plugin \"\n+\"required)\"\n+msgstr \"盡可能重新導向至出版品的開放存取版本(需要外掛程式)\"\n+\n+#: searx/templates/oscar/preferences.html:163\n+#: searx/templates/oscar/preferences.html:175\n+#: searx/templates/simple/preferences.html:88\n+msgid \"Shortcut\"\n+msgstr \"快捷鍵\"\n+\n+#: searx/templates/oscar/preferences.html:164\n+#: searx/templates/oscar/preferences.html:174\n+msgid \"Selected language\"\n+msgstr \"已選取的語言\"\n+\n+#: searx/templates/oscar/preferences.html:166\n+#: searx/templates/oscar/preferences.html:172\n+#: searx/templates/simple/preferences.html:91\n+msgid \"Time range\"\n+msgstr \"時間範圍\"\n+\n+#: searx/templates/oscar/preferences.html:167\n+#: searx/templates/oscar/preferences.html:171\n+#: searx/templates/simple/preferences.html:92\n+msgid \"Avg. time\"\n+msgstr \"平均時間\"\n+\n+#: searx/templates/oscar/preferences.html:168\n+#: searx/templates/oscar/preferences.html:170\n+#: searx/templates/simple/preferences.html:93\n+msgid \"Max time\"\n+msgstr \"最大時間\"\n+\n+#: searx/templates/oscar/preferences.html:248\n+msgid \"This is the list of searx's instant answering modules.\"\n+msgstr \"這是 searx 的即時回覆模組清單。\"\n+\n+#: searx/templates/oscar/preferences.html:252\n+msgid \"Name\"\n+msgstr \"名稱\"\n+\n+#: searx/templates/oscar/preferences.html:253\n+msgid \"Keywords\"\n+msgstr \"關鍵字\"\n+\n+#: searx/templates/oscar/preferences.html:254\n+msgid \"Description\"\n+msgstr \"描述\"\n+\n+#: searx/templates/oscar/preferences.html:255\n+msgid \"Examples\"\n+msgstr \"範例\"\n+\n+#: searx/templates/oscar/preferences.html:275\n+msgid \"\"\n+\"This is the list of cookies and their values searx is storing on your \"\n+\"computer.\"\n+msgstr \"這是 searx 儲存在您電腦上的 cookies 與它們的值的清單。\"\n+\n+#: searx/templates/oscar/preferences.html:276\n+msgid \"With that list, you can assess searx transparency.\"\n+msgstr \"有了這份清單,您就可以評估 searx 的透明度。\"\n+\n+#: searx/templates/oscar/preferences.html:281\n+msgid \"Cookie name\"\n+msgstr \"Cookie 名稱\"\n+\n+#: searx/templates/oscar/preferences.html:282\n+msgid \"Value\"\n+msgstr \"值\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"Search URL of the currently saved preferences\"\n+msgstr \"目前偏好設定的搜尋 URL\"\n+\n+#: searx/templates/oscar/preferences.html:301\n+msgid \"\"\n+\"Note: specifying custom settings in the search URL can reduce privacy by \"\n+\"leaking data to the clicked result sites.\"\n+msgstr \"注意:在搜尋 URL 中指定自訂的設定可能會降低隱私,因為會洩漏資料給點選的結果頁面。\"\n+\n+#: searx/templates/oscar/results.html:17\n+msgid \"Search results\"\n+msgstr \"搜尋結果\"\n+\n+#: searx/templates/oscar/results.html:21\n+#: searx/templates/simple/results.html:84\n+msgid \"Try searching for:\"\n+msgstr \"嘗試搜尋:\"\n+\n+#: searx/templates/oscar/results.html:100\n+#: searx/templates/simple/results.html:25\n+msgid \"Engines cannot retrieve results\"\n+msgstr \"引擎無法擷取結果\"\n+\n+#: searx/templates/oscar/results.html:131\n+msgid \"Links\"\n+msgstr \"連結\"\n+\n+#: searx/templates/oscar/search.html:8\n+#: searx/templates/oscar/search_full.html:11\n+#: searx/templates/simple/search.html:5\n+msgid \"Start search\"\n+msgstr \"開始搜尋\"\n+\n+#: searx/templates/oscar/stats.html:2\n+msgid \"stats\"\n+msgstr \"統計\"\n+\n+#: searx/templates/oscar/time-range.html:3\n+#: searx/templates/simple/time-range.html:3\n+msgid \"Anytime\"\n+msgstr \"任何時間\"\n+\n+#: searx/templates/oscar/time-range.html:6\n+#: searx/templates/simple/time-range.html:6\n+msgid \"Last day\"\n+msgstr \"一天內\"\n+\n+#: searx/templates/oscar/time-range.html:9\n+#: searx/templates/simple/time-range.html:9\n+msgid \"Last week\"\n+msgstr \"一週內\"\n+\n+#: searx/templates/oscar/time-range.html:12\n+#: searx/templates/simple/time-range.html:12\n+msgid \"Last month\"\n+msgstr \"一個月內\"\n+\n+#: searx/templates/oscar/time-range.html:15\n+#: searx/templates/simple/time-range.html:15\n+msgid \"Last year\"\n+msgstr \"一年內\"\n+\n+#: searx/templates/oscar/messages/first_time.html:6\n+#: searx/templates/oscar/messages/no_data_available.html:3\n+msgid \"Heads up!\"\n+msgstr \"注意!\"\n+\n+#: searx/templates/oscar/messages/first_time.html:7\n+msgid \"It look like you are using searx first time.\"\n+msgstr \"看起來您是第一次使用 searx。\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:3\n+msgid \"Information!\"\n+msgstr \"資訊!\"\n+\n+#: searx/templates/oscar/messages/no_cookies.html:4\n+msgid \"currently, there are no cookies defined.\"\n+msgstr \"目前未有已定義的 cookies。\"\n+\n+#: searx/templates/oscar/messages/no_data_available.html:4\n+msgid \"There is currently no data available. \"\n+msgstr \"目前無可用資料。\"\n+\n+#: searx/templates/oscar/messages/no_results.html:4\n+#: searx/templates/simple/messages/no_results.html:4\n+msgid \"Engines cannot retrieve results.\"\n+msgstr \"引擎無法擷取結果。\"\n+\n+#: searx/templates/oscar/messages/no_results.html:10\n+#: searx/templates/simple/messages/no_results.html:10\n+msgid \"Please, try again later or find another searx instance.\"\n+msgstr \"請再試一次或是使用其他 searx 實體搜尋。\"\n+\n+#: searx/templates/oscar/messages/no_results.html:14\n+#: searx/templates/simple/messages/no_results.html:14\n+msgid \"Sorry!\"\n+msgstr \"抱歉!\"\n+\n+#: searx/templates/oscar/messages/no_results.html:15\n+#: searx/templates/simple/messages/no_results.html:15\n+msgid \"\"\n+\"we didn't find any results. Please use another query or search in more \"\n+\"categories.\"\n+msgstr \"我們找不到任何結果。請使用其他搜尋方式或在更多分類中搜尋。\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:7\n+msgid \"Well done!\"\n+msgstr \"很好!\"\n+\n+#: searx/templates/oscar/messages/save_settings_successfull.html:8\n+msgid \"Settings saved successfully.\"\n+msgstr \"設定成功儲存。\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:7\n+msgid \"Oh snap!\"\n+msgstr \"糟糕!\"\n+\n+#: searx/templates/oscar/messages/unknow_error.html:8\n+msgid \"Something went wrong.\"\n+msgstr \"發生了一點問題。\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"show media\"\n+msgstr \"顯示媒體\"\n+\n+#: searx/templates/oscar/result_templates/default.html:7\n+#: searx/templates/simple/result_templates/default.html:6\n+msgid \"hide media\"\n+msgstr \"隱藏媒體\"\n+\n+#: searx/templates/oscar/result_templates/images.html:30\n+msgid \"Get image\"\n+msgstr \"取得圖片\"\n+\n+#: searx/templates/oscar/result_templates/images.html:33\n+msgid \"View source\"\n+msgstr \"檢視來源\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"show map\"\n+msgstr \"顯示地圖\"\n+\n+#: searx/templates/oscar/result_templates/map.html:7\n+#: searx/templates/simple/result_templates/map.html:7\n+msgid \"hide map\"\n+msgstr \"隱藏地圖\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"show details\"\n+msgstr \"顯示詳情\"\n+\n+#: searx/templates/oscar/result_templates/map.html:11\n+#: searx/templates/simple/result_templates/map.html:11\n+msgid \"hide details\"\n+msgstr \"隱藏詳情\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:7\n+#: searx/templates/simple/result_templates/torrent.html:11\n+msgid \"Filesize\"\n+msgstr \"檔案大小\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:9\n+#: searx/templates/simple/result_templates/torrent.html:12\n+msgid \"Bytes\"\n+msgstr \"位元組\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:10\n+#: searx/templates/simple/result_templates/torrent.html:13\n+msgid \"kiB\"\n+msgstr \"kiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:11\n+#: searx/templates/simple/result_templates/torrent.html:14\n+msgid \"MiB\"\n+msgstr \"MiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:12\n+#: searx/templates/simple/result_templates/torrent.html:15\n+msgid \"GiB\"\n+msgstr \"GiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:13\n+#: searx/templates/simple/result_templates/torrent.html:16\n+msgid \"TiB\"\n+msgstr \"TiB\"\n+\n+#: searx/templates/oscar/result_templates/torrent.html:15\n+#: searx/templates/simple/result_templates/torrent.html:20\n+msgid \"Number of Files\"\n+msgstr \"檔案數量\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"show video\"\n+msgstr \"顯示影片\"\n+\n+#: searx/templates/oscar/result_templates/videos.html:7\n+#: searx/templates/simple/result_templates/videos.html:6\n+msgid \"hide video\"\n+msgstr \"隱藏影片\"\n+\n+#: searx/templates/pix-art/results.html:28\n+msgid \"Load more...\"\n+msgstr \"載入更多……\"\n+\n+#: searx/templates/simple/base.html:31\n+msgid \"No item found\"\n+msgstr \"找不到項目\"\n+\n+#: searx/templates/simple/preferences.html:89\n+msgid \"Supports selected language\"\n+msgstr \"支援選定的語言\"\n+\n+#: searx/templates/simple/preferences.html:118\n+msgid \"User interface\"\n+msgstr \"使用者介面\"\n+\n+#: searx/templates/simple/preferences.html:154\n+msgid \"Privacy\"\n+msgstr \"隱私\"\ndiff --git a/searx/version.py b/searx/version.py\nindex 11dfb1eeff..4e149722e5 100644\n--- a/searx/version.py\n+++ b/searx/version.py\n@@ -18,7 +18,7 @@\n \n # version of searx\n VERSION_MAJOR = 0\n-VERSION_MINOR = 14\n+VERSION_MINOR = 15\n VERSION_BUILD = 0\n \n VERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n"}}},{"rowIdx":417,"cells":{"in_source_id":{"kind":"string","value":"UTNkar__moore-554"},"issue":{"kind":"string","value":"Fix cookies for Utnarm\nUtnarm recently switched to utnarm.se instead of utnarm.utn.se. This lead to that you can’t sign in to utnarm.se. Per default, moore uses utn.se as cookie domain and since utnarm.se is a different top level domain the cookies can’t be used. \r\n\r\nWe need to dynamically add utnarm.se as a cookie domain. This python package might be useful https://github.com/ViktorStiskala/django-shared-session\n"},"before_files":{"kind":"list like","value":[{"content":"\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport raven\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://utn.se'\n\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = 'info@utn.se'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Sentry Configuration - will be sent error messages\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('SENTRY_DSN'),\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat'\n '.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_DOMAIN = '.utn.se'\n\nSESSION_COOKIE_SECURE = True\n\nMELOS_URL = os.environ.get('MELOS_URL')\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\n\n# Google API\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\n\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\"RECAPTCHA_PUBLIC_KEY\", \"\")\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\"RECAPTCHA_PRIVATE_KEY\", \"\")\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n","path":"src/moore/settings/production.py"}],"string":"[\n {\n \"content\": \"\\\"\\\"\\\"\\nDjango settings for the production environment of Project Moore.\\n\\nFor more information regarding running in production see,\\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\\n\\nFor more information on this file, see\\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\\n\\nFor the full list of settings and their values, see\\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\\n\\\"\\\"\\\"\\nfrom __future__ import absolute_import, unicode_literals\\nimport raven\\n\\nfrom .base import *\\n\\n# SECURITY WARNING: don't run with debug turned on in production!\\nDEBUG = False\\n\\n# SECURITY WARNING: keep the secret key used in production secret!\\nSECRET_KEY = os.environ.get(\\n 'DJANGO_SECRET',\\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\\n)\\n\\n# Database\\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\\n\\nDATABASES = {\\n 'default': {\\n 'ENGINE': 'django.db.backends.postgresql',\\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\\n }\\n}\\n\\n\\n# CONN_MAX_AGE = 0\\n\\n# Base URL to use when referring to full URLs within the Wagtail admin\\n# backend - e.g. in notification emails. Don't include '/admin' or a\\n# trailing slash\\nBASE_URL = 'https://utn.se'\\n\\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\\n\\n# Email settings\\nDEFAULT_FROM_EMAIL = 'info@utn.se'\\n\\nEMAIL_SUBJECT_PREFIX = '[UTN] '\\n\\n# Sentry Configuration - will be sent error messages\\nRAVEN_CONFIG = {\\n 'dsn': os.environ.get('SENTRY_DSN'),\\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\\n}\\n\\nLOGGING = {\\n 'version': 1,\\n 'disable_existing_loggers': True,\\n 'root': {\\n 'level': 'WARNING',\\n 'handlers': ['sentry'],\\n },\\n 'formatters': {\\n 'verbose': {\\n 'format': '%(levelname)s %(asctime)s %(module)s '\\n '%(process)d %(thread)d %(message)s'\\n },\\n },\\n 'handlers': {\\n 'sentry': {\\n 'level': 'ERROR',\\n 'class': 'raven.contrib.django.raven_compat'\\n '.handlers.SentryHandler',\\n 'tags': {'custom-tag': 'x'},\\n },\\n 'console': {\\n 'level': 'DEBUG',\\n 'class': 'logging.StreamHandler',\\n 'formatter': 'verbose'\\n }\\n },\\n 'loggers': {\\n 'django.db.backends': {\\n 'level': 'ERROR',\\n 'handlers': ['console'],\\n 'propagate': False,\\n },\\n 'raven': {\\n 'level': 'DEBUG',\\n 'handlers': ['console'],\\n 'propagate': False,\\n },\\n 'sentry.errors': {\\n 'level': 'DEBUG',\\n 'handlers': ['console'],\\n 'propagate': False,\\n },\\n },\\n}\\n\\nCSRF_COOKIE_SECURE = True\\n\\nSESSION_COOKIE_DOMAIN = '.utn.se'\\n\\nSESSION_COOKIE_SECURE = True\\n\\nMELOS_URL = os.environ.get('MELOS_URL')\\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\\n\\n# Google API\\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\\n\\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\\\"RECAPTCHA_PUBLIC_KEY\\\", \\\"\\\")\\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\\\"RECAPTCHA_PRIVATE_KEY\\\", \\\"\\\")\\n\\ntry:\\n from .local import *\\nexcept ImportError:\\n pass\\n\",\n \"path\": \"src/moore/settings/production.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport raven\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://utn.se'\n\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = 'info@utn.se'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Sentry Configuration - will be sent error messages\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('SENTRY_DSN'),\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat'\n '.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_SECURE = True\n\nMELOS_URL = os.environ.get('MELOS_URL')\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\n\n# Google API\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\n\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\"RECAPTCHA_PUBLIC_KEY\", \"\")\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\"RECAPTCHA_PRIVATE_KEY\", \"\")\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n","path":"src/moore/settings/production.py"}],"string":"[\n {\n \"content\": \"\\\"\\\"\\\"\\nDjango settings for the production environment of Project Moore.\\n\\nFor more information regarding running in production see,\\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\\n\\nFor more information on this file, see\\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\\n\\nFor the full list of settings and their values, see\\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\\n\\\"\\\"\\\"\\nfrom __future__ import absolute_import, unicode_literals\\nimport raven\\n\\nfrom .base import *\\n\\n# SECURITY WARNING: don't run with debug turned on in production!\\nDEBUG = False\\n\\n# SECURITY WARNING: keep the secret key used in production secret!\\nSECRET_KEY = os.environ.get(\\n 'DJANGO_SECRET',\\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\\n)\\n\\n# Database\\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\\n\\nDATABASES = {\\n 'default': {\\n 'ENGINE': 'django.db.backends.postgresql',\\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\\n }\\n}\\n\\n\\n# CONN_MAX_AGE = 0\\n\\n# Base URL to use when referring to full URLs within the Wagtail admin\\n# backend - e.g. in notification emails. Don't include '/admin' or a\\n# trailing slash\\nBASE_URL = 'https://utn.se'\\n\\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\\n\\n# Email settings\\nDEFAULT_FROM_EMAIL = 'info@utn.se'\\n\\nEMAIL_SUBJECT_PREFIX = '[UTN] '\\n\\n# Sentry Configuration - will be sent error messages\\nRAVEN_CONFIG = {\\n 'dsn': os.environ.get('SENTRY_DSN'),\\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\\n}\\n\\nLOGGING = {\\n 'version': 1,\\n 'disable_existing_loggers': True,\\n 'root': {\\n 'level': 'WARNING',\\n 'handlers': ['sentry'],\\n },\\n 'formatters': {\\n 'verbose': {\\n 'format': '%(levelname)s %(asctime)s %(module)s '\\n '%(process)d %(thread)d %(message)s'\\n },\\n },\\n 'handlers': {\\n 'sentry': {\\n 'level': 'ERROR',\\n 'class': 'raven.contrib.django.raven_compat'\\n '.handlers.SentryHandler',\\n 'tags': {'custom-tag': 'x'},\\n },\\n 'console': {\\n 'level': 'DEBUG',\\n 'class': 'logging.StreamHandler',\\n 'formatter': 'verbose'\\n }\\n },\\n 'loggers': {\\n 'django.db.backends': {\\n 'level': 'ERROR',\\n 'handlers': ['console'],\\n 'propagate': False,\\n },\\n 'raven': {\\n 'level': 'DEBUG',\\n 'handlers': ['console'],\\n 'propagate': False,\\n },\\n 'sentry.errors': {\\n 'level': 'DEBUG',\\n 'handlers': ['console'],\\n 'propagate': False,\\n },\\n },\\n}\\n\\nCSRF_COOKIE_SECURE = True\\n\\nSESSION_COOKIE_SECURE = True\\n\\nMELOS_URL = os.environ.get('MELOS_URL')\\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\\n\\n# Google API\\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\\n\\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\\\"RECAPTCHA_PUBLIC_KEY\\\", \\\"\\\")\\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\\\"RECAPTCHA_PRIVATE_KEY\\\", \\\"\\\")\\n\\ntry:\\n from .local import *\\nexcept ImportError:\\n pass\\n\",\n \"path\": \"src/moore/settings/production.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/src/moore/settings/production.py b/src/moore/settings/production.py\nindex 9fca8dec..4dd269e0 100644\n--- a/src/moore/settings/production.py\n+++ b/src/moore/settings/production.py\n@@ -106,8 +106,6 @@\n \n CSRF_COOKIE_SECURE = True\n \n-SESSION_COOKIE_DOMAIN = '.utn.se'\n-\n SESSION_COOKIE_SECURE = True\n \n MELOS_URL = os.environ.get('MELOS_URL')\n"}}},{"rowIdx":418,"cells":{"in_source_id":{"kind":"string","value":"chainer__chainer-1568"},"issue":{"kind":"string","value":"Inconsistency between cupy.dstack and numpy.dstack\n```\nIn [10]: import cupy, numpy\nIn [11]: a = cupy.arange(24).reshape(2, 3, 4)\nIn [12]: numpy.dstack((a.get(),))\nOut[12]: \narray([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\nIn [13]: cupy.dstack((a,))\n---------------------------------------------------------------------------\nIndexError Traceback (most recent call last)\n in ()\n----> 1 cupy.dstack((a,))\n\n/home/delta/dev/chainer/cupy/manipulation/join.py in dstack(tup)\n 101 \n 102 \"\"\"\n--> 103 return concatenate(cupy.atleast_3d(*tup), 2)\n 104 \n 105 \n\n/home/delta/dev/chainer/cupy/manipulation/join.py in concatenate(tup, axis)\n 59 ndim = a.ndim\n 60 shape = list(a.shape)\n---> 61 axis = _get_positive_axis(a.ndim, axis)\n 62 continue\n 63 \n\n/home/delta/dev/chainer/cupy/manipulation/join.py in _get_positive_axis(ndim, axis)\n 167 a += ndim\n 168 if a < 0 or a >= ndim:\n--> 169 raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n 170 return a\n\nIndexError: axis 2 out of bounds [0, 2)\n```\n\n"},"before_files":{"kind":"list like","value":[{"content":"import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate(cupy.atleast_3d(*tup), 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n","path":"cupy/manipulation/join.py"}],"string":"[\n {\n \"content\": \"import numpy\\nimport six\\n\\nimport cupy\\n\\n\\ndef column_stack(tup):\\n \\\"\\\"\\\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\\n\\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\\n are concatenated along the second axis.\\n\\n Args:\\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\\n\\n Returns:\\n cupy.ndarray: A new 2-D array of stacked columns.\\n\\n .. seealso:: :func:`numpy.column_stack`\\n\\n \\\"\\\"\\\"\\n if any(not isinstance(a, cupy.ndarray) for a in tup):\\n raise TypeError('Only cupy arrays can be column stacked')\\n\\n lst = list(tup)\\n for i, a in enumerate(lst):\\n if a.ndim == 1:\\n a = a[:, cupy.newaxis]\\n lst[i] = a\\n elif a.ndim != 2:\\n raise ValueError(\\n 'Only 1 or 2 dimensional arrays can be column stacked')\\n\\n return concatenate(lst, axis=1)\\n\\n\\ndef concatenate(tup, axis=0):\\n \\\"\\\"\\\"Joins arrays along an axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be joined. All of these should have\\n same dimensionalities except the specified axis.\\n axis (int): The axis to join arrays along.\\n\\n Returns:\\n cupy.ndarray: Joined array.\\n\\n .. seealso:: :func:`numpy.concatenate`\\n\\n \\\"\\\"\\\"\\n ndim = None\\n shape = None\\n for a in tup:\\n if not isinstance(a, cupy.ndarray):\\n raise TypeError('Only cupy arrays can be concatenated')\\n if a.ndim == 0:\\n raise TypeError('zero-dimensional arrays cannot be concatenated')\\n if ndim is None:\\n ndim = a.ndim\\n shape = list(a.shape)\\n axis = _get_positive_axis(a.ndim, axis)\\n continue\\n\\n if a.ndim != ndim:\\n raise ValueError(\\n 'All arrays to concatenate must have the same ndim')\\n if any(i != axis and shape[i] != a.shape[i]\\n for i in six.moves.range(ndim)):\\n raise ValueError(\\n 'All arrays must have same shape except the axis to '\\n 'concatenate')\\n shape[axis] += a.shape[axis]\\n\\n if ndim is None:\\n raise ValueError('Cannot concatenate from empty tuple')\\n\\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\\n ret = cupy.empty(shape, dtype=dtype)\\n\\n skip = (slice(None),) * axis\\n i = 0\\n for a in tup:\\n aw = a.shape[axis]\\n ret[skip + (slice(i, i + aw),)] = a\\n i += aw\\n\\n return ret\\n\\n\\ndef dstack(tup):\\n \\\"\\\"\\\"Stacks arrays along the third axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\\n by :func:`cupy.atleast_3d` before stacking.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.dstack`\\n\\n \\\"\\\"\\\"\\n return concatenate(cupy.atleast_3d(*tup), 2)\\n\\n\\ndef hstack(tup):\\n \\\"\\\"\\\"Stacks arrays horizontally.\\n\\n If an input array has one dimension, then the array is treated as a\\n horizontal vector and stacked along the first axis. Otherwise, the array is\\n stacked along the second axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.hstack`\\n\\n \\\"\\\"\\\"\\n arrs = [cupy.atleast_1d(a) for a in tup]\\n axis = 1\\n if arrs[0].ndim == 1:\\n axis = 0\\n return concatenate(arrs, axis)\\n\\n\\ndef vstack(tup):\\n \\\"\\\"\\\"Stacks arrays vertically.\\n\\n If an input array has one dimension, then the array is treated as a\\n horizontal vector and stacked along the additional axis at the head.\\n Otherwise, the array is stacked along the first axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\\n by :func:`cupy.atleast_2d` before stacking.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.dstack`\\n\\n \\\"\\\"\\\"\\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\\n\\n\\ndef stack(tup, axis=0):\\n \\\"\\\"\\\"Stacks arrays along a new axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked.\\n axis (int): Axis along which the arrays are stacked.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.stack`\\n \\\"\\\"\\\"\\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\\n\\n\\ndef _get_positive_axis(ndim, axis):\\n a = axis\\n if a < 0:\\n a += ndim\\n if a < 0 or a >= ndim:\\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\\n return a\\n\",\n \"path\": \"cupy/manipulation/join.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n","path":"cupy/manipulation/join.py"}],"string":"[\n {\n \"content\": \"import numpy\\nimport six\\n\\nimport cupy\\n\\n\\ndef column_stack(tup):\\n \\\"\\\"\\\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\\n\\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\\n are concatenated along the second axis.\\n\\n Args:\\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\\n\\n Returns:\\n cupy.ndarray: A new 2-D array of stacked columns.\\n\\n .. seealso:: :func:`numpy.column_stack`\\n\\n \\\"\\\"\\\"\\n if any(not isinstance(a, cupy.ndarray) for a in tup):\\n raise TypeError('Only cupy arrays can be column stacked')\\n\\n lst = list(tup)\\n for i, a in enumerate(lst):\\n if a.ndim == 1:\\n a = a[:, cupy.newaxis]\\n lst[i] = a\\n elif a.ndim != 2:\\n raise ValueError(\\n 'Only 1 or 2 dimensional arrays can be column stacked')\\n\\n return concatenate(lst, axis=1)\\n\\n\\ndef concatenate(tup, axis=0):\\n \\\"\\\"\\\"Joins arrays along an axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be joined. All of these should have\\n same dimensionalities except the specified axis.\\n axis (int): The axis to join arrays along.\\n\\n Returns:\\n cupy.ndarray: Joined array.\\n\\n .. seealso:: :func:`numpy.concatenate`\\n\\n \\\"\\\"\\\"\\n ndim = None\\n shape = None\\n for a in tup:\\n if not isinstance(a, cupy.ndarray):\\n raise TypeError('Only cupy arrays can be concatenated')\\n if a.ndim == 0:\\n raise TypeError('zero-dimensional arrays cannot be concatenated')\\n if ndim is None:\\n ndim = a.ndim\\n shape = list(a.shape)\\n axis = _get_positive_axis(a.ndim, axis)\\n continue\\n\\n if a.ndim != ndim:\\n raise ValueError(\\n 'All arrays to concatenate must have the same ndim')\\n if any(i != axis and shape[i] != a.shape[i]\\n for i in six.moves.range(ndim)):\\n raise ValueError(\\n 'All arrays must have same shape except the axis to '\\n 'concatenate')\\n shape[axis] += a.shape[axis]\\n\\n if ndim is None:\\n raise ValueError('Cannot concatenate from empty tuple')\\n\\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\\n ret = cupy.empty(shape, dtype=dtype)\\n\\n skip = (slice(None),) * axis\\n i = 0\\n for a in tup:\\n aw = a.shape[axis]\\n ret[skip + (slice(i, i + aw),)] = a\\n i += aw\\n\\n return ret\\n\\n\\ndef dstack(tup):\\n \\\"\\\"\\\"Stacks arrays along the third axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\\n by :func:`cupy.atleast_3d` before stacking.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.dstack`\\n\\n \\\"\\\"\\\"\\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\\n\\n\\ndef hstack(tup):\\n \\\"\\\"\\\"Stacks arrays horizontally.\\n\\n If an input array has one dimension, then the array is treated as a\\n horizontal vector and stacked along the first axis. Otherwise, the array is\\n stacked along the second axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.hstack`\\n\\n \\\"\\\"\\\"\\n arrs = [cupy.atleast_1d(a) for a in tup]\\n axis = 1\\n if arrs[0].ndim == 1:\\n axis = 0\\n return concatenate(arrs, axis)\\n\\n\\ndef vstack(tup):\\n \\\"\\\"\\\"Stacks arrays vertically.\\n\\n If an input array has one dimension, then the array is treated as a\\n horizontal vector and stacked along the additional axis at the head.\\n Otherwise, the array is stacked along the first axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\\n by :func:`cupy.atleast_2d` before stacking.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.dstack`\\n\\n \\\"\\\"\\\"\\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\\n\\n\\ndef stack(tup, axis=0):\\n \\\"\\\"\\\"Stacks arrays along a new axis.\\n\\n Args:\\n tup (sequence of arrays): Arrays to be stacked.\\n axis (int): Axis along which the arrays are stacked.\\n\\n Returns:\\n cupy.ndarray: Stacked array.\\n\\n .. seealso:: :func:`numpy.stack`\\n \\\"\\\"\\\"\\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\\n\\n\\ndef _get_positive_axis(ndim, axis):\\n a = axis\\n if a < 0:\\n a += ndim\\n if a < 0 or a >= ndim:\\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\\n return a\\n\",\n \"path\": \"cupy/manipulation/join.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/cupy/manipulation/join.py b/cupy/manipulation/join.py\nindex 14165a8ff2f0..6e077d78221b 100644\n--- a/cupy/manipulation/join.py\n+++ b/cupy/manipulation/join.py\n@@ -100,7 +100,7 @@ def dstack(tup):\n .. seealso:: :func:`numpy.dstack`\n \n \"\"\"\n- return concatenate(cupy.atleast_3d(*tup), 2)\n+ return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n \n \n def hstack(tup):\ndiff --git a/tests/cupy_tests/manipulation_tests/test_join.py b/tests/cupy_tests/manipulation_tests/test_join.py\nindex 949ea1406001..e37ab83b5405 100644\n--- a/tests/cupy_tests/manipulation_tests/test_join.py\n+++ b/tests/cupy_tests/manipulation_tests/test_join.py\n@@ -73,6 +73,21 @@ def test_dstack(self, xp):\n c = testing.shaped_arange((1, 3), xp)\n return xp.dstack((a, b, c))\n \n+ @testing.numpy_cupy_array_equal()\n+ def test_dstack_single_element(self, xp):\n+ a = testing.shaped_arange((1, 2, 3), xp)\n+ return xp.dstack((a,))\n+\n+ @testing.numpy_cupy_array_equal()\n+ def test_dstack_single_element_2(self, xp):\n+ a = testing.shaped_arange((1, 2), xp)\n+ return xp.dstack((a,))\n+\n+ @testing.numpy_cupy_array_equal()\n+ def test_dstack_single_element_3(self, xp):\n+ a = testing.shaped_arange((1,), xp)\n+ return xp.dstack((a,))\n+\n @testing.numpy_cupy_array_equal()\n def test_hstack_vectors(self, xp):\n a = xp.arange(3)\n"}}},{"rowIdx":419,"cells":{"in_source_id":{"kind":"string","value":"talonhub__community-740"},"issue":{"kind":"string","value":"auto_insert is documented to call auto_format\nI understand if you want to override `auto_insert` too because you're adjusting the cursor position afterwards, but the general intention is for `auto_format` to do text formatting\r\n\r\nI think the dictation `auto_insert()` implementation should potentially first `text = auto_format(text)` (which is the identity function by default) as per the docstring:\r\n```\r\nauto_insert(text: str)\r\n Insert text at the current cursor position, automatically formatting it using actions.auto_format(text)\r\n```\r\n\r\nhttps://github.com/knausj85/knausj_talon/blob/master/code/dictation.py#L401\n"},"before_files":{"kind":"list like","value":[{"content":"# Descended from https://github.com/dwiel/talon_community/blob/master/misc/dictation.py\nfrom talon import Module, Context, ui, actions, clip, app, grammar\nfrom typing import Optional, Tuple, Literal, Callable\nimport re\n\nmod = Module()\n\nsetting_context_sensitive_dictation = mod.setting(\n \"context_sensitive_dictation\",\n type=bool,\n default=False,\n desc=\"Look at surrounding text to improve auto-capitalization/spacing in dictation mode. By default, this works by selecting that text & copying it to the clipboard, so it may be slow or fail in some applications.\",\n)\n\nmod.list(\"prose_modifiers\", desc=\"Modifiers that can be used within prose\")\nmod.list(\"prose_snippets\", desc=\"Snippets that can be used within prose\")\nctx = Context()\n# Maps spoken forms to DictationFormat method names (see DictationFormat below).\nctx.lists[\"user.prose_modifiers\"] = {\n \"cap\": \"cap\",\n \"no cap\": \"no_cap\",\n \"no caps\": \"no_cap\", # \"no caps\" variant for Dragon\n \"no space\": \"no_space\",\n}\nctx.lists[\"user.prose_snippets\"] = {\n \"spacebar\": \" \",\n \"new line\": \"\\n\",\n \"new paragraph\": \"\\n\\n\",\n # Curly quotes are used to obtain proper spacing for left and right quotes, but will later be straightened.\n \"open quote\": \"“\",\n \"close quote\": \"”\",\n \"smiley\": \":-)\",\n \"winky\": \";-)\",\n \"frowny\": \":-(\",\n}\n\n@mod.capture(rule=\"{user.prose_modifiers}\")\ndef prose_modifier(m) -> Callable:\n return getattr(DictationFormat, m.prose_modifiers)\n\n@mod.capture(rule=\"({user.vocabulary} | )\")\ndef word(m) -> str:\n \"\"\"A single word, including user-defined vocabulary.\"\"\"\n try:\n return m.vocabulary\n except AttributeError:\n return \" \".join(actions.user.replace_phrases(actions.dictate.parse_words(m.word)))\n\n@mod.capture(rule=\"({user.vocabulary} | )+\")\ndef text(m) -> str:\n \"\"\"A sequence of words, including user-defined vocabulary.\"\"\"\n return format_phrase(m)\n\n@mod.capture(rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | | )+\")\ndef prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized.\"\"\"\n # Straighten curly quotes that were introduced to obtain proper spacing.\n return apply_formatting(m).replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n\n@mod.capture(rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | )+\")\ndef raw_prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized, without quote straightening and commands (for use in dictation mode).\"\"\"\n return apply_formatting(m)\n\n\f\n# ---------- FORMATTING ---------- #\ndef format_phrase(m):\n words = capture_to_words(m)\n result = \"\"\n for i, word in enumerate(words):\n if i > 0 and needs_space_between(words[i-1], word):\n result += \" \"\n result += word\n return result\n\ndef capture_to_words(m):\n words = []\n for item in m:\n words.extend(\n actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n return words\n\ndef apply_formatting(m):\n formatter = DictationFormat()\n formatter.state = None\n result = \"\"\n for item in m:\n # prose modifiers (cap/no cap/no space) produce formatter callbacks.\n if isinstance(item, Callable):\n item(formatter)\n else:\n words = (actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n for word in words:\n result += formatter.format(word)\n return result\n\n# There must be a simpler way to do this, but I don't see it right now.\nno_space_after = re.compile(r\"\"\"\n (?:\n [\\s\\-_/#@([{‘“] # characters that never need space after them\n | (? bool:\n return not text or no_space_before.search(text)\ndef omit_space_after(text: str) -> bool:\n return not text or no_space_after.search(text)\ndef needs_space_between(before: str, after: str) -> bool:\n return not (omit_space_after(before) or omit_space_before(after))\n\n# # TESTS, uncomment to enable\n# assert needs_space_between(\"a\", \"break\")\n# assert needs_space_between(\"break\", \"a\")\n# assert needs_space_between(\".\", \"a\")\n# assert needs_space_between(\"said\", \"'hello\")\n# assert needs_space_between(\"hello'\", \"said\")\n# assert needs_space_between(\"hello.\", \"'John\")\n# assert needs_space_between(\"John.'\", \"They\")\n# assert needs_space_between(\"paid\", \"$50\")\n# assert needs_space_between(\"50$\", \"payment\")\n# assert not needs_space_between(\"\", \"\")\n# assert not needs_space_between(\"a\", \"\")\n# assert not needs_space_between(\"a\", \" \")\n# assert not needs_space_between(\"\", \"a\")\n# assert not needs_space_between(\" \", \"a\")\n# assert not needs_space_between(\"a\", \",\")\n# assert not needs_space_between(\"'\", \"a\")\n# assert not needs_space_between(\"a\", \"'\")\n# assert not needs_space_between(\"and-\", \"or\")\n# assert not needs_space_between(\"mary\", \"-kate\")\n# assert not needs_space_between(\"$\", \"50\")\n# assert not needs_space_between(\"US\", \"$\")\n# assert not needs_space_between(\"(\", \")\")\n# assert not needs_space_between(\"(\", \"e.g.\")\n# assert not needs_space_between(\"example\", \")\")\n# assert not needs_space_between(\"example\", '\".')\n# assert not needs_space_between(\"example\", '.\"')\n# assert not needs_space_between(\"hello'\", \".\")\n# assert not needs_space_between(\"hello.\", \"'\")\n\ndef auto_capitalize(text, state = None):\n \"\"\"\n Auto-capitalizes text. `state` argument means:\n\n - None: Don't capitalize initial word.\n - \"sentence start\": Capitalize initial word.\n - \"after newline\": Don't capitalize initial word, but we're after a newline.\n Used for double-newline detection.\n\n Returns (capitalized text, updated state).\n \"\"\"\n output = \"\"\n # Imagine a metaphorical \"capitalization charge\" travelling through the\n # string left-to-right.\n charge = state == \"sentence start\"\n newline = state == \"after newline\"\n for c in text:\n # Sentence endings & double newlines create a charge.\n if c in \".!?\" or (newline and c == \"\\n\"):\n charge = True\n # Alphanumeric characters and commas/colons absorb charge & try to\n # capitalize (for numbers & punctuation this does nothing, which is what\n # we want).\n elif charge and (c.isalnum() or c in \",:\"):\n charge = False\n c = c.capitalize()\n # Otherwise the charge just passes through.\n output += c\n newline = c == \"\\n\"\n return output, (\"sentence start\" if charge else\n \"after newline\" if newline else None)\n\n\f\n# ---------- DICTATION AUTO FORMATTING ---------- #\nclass DictationFormat:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.reset_context()\n self.force_no_space = False\n self.force_capitalization = None # Can also be \"cap\" or \"no cap\".\n\n def reset_context(self):\n self.before = \"\"\n self.state = \"sentence start\"\n\n def update_context(self, before):\n if before is None: return\n self.reset_context()\n self.pass_through(before)\n\n def pass_through(self, text):\n _, self.state = auto_capitalize(text, self.state)\n self.before = text or self.before\n\n def format(self, text, auto_cap=True):\n if not self.force_no_space and needs_space_between(self.before, text):\n text = \" \" + text\n self.force_no_space = False\n if auto_cap:\n text, self.state = auto_capitalize(text, self.state)\n if self.force_capitalization == \"cap\":\n text = format_first_letter(text, lambda s: s.capitalize())\n self.force_capitalization = None\n if self.force_capitalization == \"no cap\":\n text = format_first_letter(text, lambda s: s.lower())\n self.force_capitalization = None\n self.before = text or self.before\n return text\n\n # These are used as callbacks by prose modifiers / dictation_mode commands.\n def cap(self): self.force_capitalization = \"cap\"\n def no_cap(self): self.force_capitalization = \"no cap\"\n def no_space(self):\n # This is typically used after repositioning the cursor, so it is helpful to\n # reset capitalization as well.\n #\n # FIXME: this sets state to \"sentence start\", capitalizing the next\n # word. probably undesirable, since most places are not the start of\n # sentences?\n self.reset_context()\n self.force_no_space = True\n\ndef format_first_letter(text, formatter):\n i = -1\n for i, c in enumerate(text):\n if c.isalpha():\n break\n if i >= 0 and i < len(text):\n text = text[:i] + formatter(text[i]) + text[i+1:]\n return text\n\ndictation_formatter = DictationFormat()\nui.register(\"app_deactivate\", lambda app: dictation_formatter.reset())\nui.register(\"win_focus\", lambda win: dictation_formatter.reset())\n\ndef reformat_last_utterance(formatter):\n text = actions.user.get_last_phrase()\n actions.user.clear_last_phrase()\n text = formatter(text)\n actions.user.add_phrase_to_history(text)\n actions.insert(text)\n\n@mod.action_class\nclass Actions:\n def dictation_format_reset():\n \"\"\"Resets the dictation formatter\"\"\"\n return dictation_formatter.reset()\n\n def dictation_format_cap():\n \"\"\"Sets the dictation formatter to capitalize\"\"\"\n dictation_formatter.cap()\n\n def dictation_format_no_cap():\n \"\"\"Sets the dictation formatter to not capitalize\"\"\"\n dictation_formatter.no_cap()\n\n def dictation_format_no_space():\n \"\"\"Sets the dictation formatter to not prepend a space\"\"\"\n dictation_formatter.no_space()\n\n def dictation_reformat_cap():\n \"\"\"Capitalizes the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.capitalize()))\n\n def dictation_reformat_no_cap():\n \"\"\"Lowercases the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.lower()))\n\n def dictation_reformat_no_space():\n \"\"\"Removes space before the last utterance\"\"\"\n reformat_last_utterance(lambda s: s[1:] if s.startswith(\" \") else s)\n\n def dictation_insert_raw(text: str):\n \"\"\"Inserts text as-is, without invoking the dictation formatter.\"\"\"\n actions.user.dictation_insert(text, auto_cap=False)\n\n def dictation_insert(text: str, auto_cap: bool=True) -> str:\n \"\"\"Inserts dictated text, formatted appropriately.\"\"\"\n add_space_after = False\n if setting_context_sensitive_dictation.get():\n # Peek left if we might need leading space or auto-capitalization.\n if (not omit_space_before(text)\n or text != auto_capitalize(text, \"sentence start\")[0]):\n dictation_formatter.update_context(\n actions.user.dictation_peek_left(clobber=True))\n # Peek right if we might need trailing space. NB. We peek right\n # BEFORE insertion to avoid breaking the undo-chain between the\n # inserted text and the trailing space.\n if not omit_space_after(text):\n char = actions.user.dictation_peek_right()\n add_space_after = char is not None and needs_space_between(text, char)\n text = dictation_formatter.format(text, auto_cap)\n # Straighten curly quotes that were introduced to obtain proper\n # spacing. The formatter context still has the original curly quotes\n # so that future dictation is properly formatted.\n text = text.replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n actions.user.add_phrase_to_history(text)\n # we insert the text all at once in case we have an implementation of\n # insert that is more efficient for long strings, eg. paste-to-insert\n actions.insert(text + (\" \" if add_space_after else \"\"))\n if add_space_after: actions.edit.left()\n\n def dictation_peek_left(clobber: bool = False) -> Optional[str]:\n \"\"\"\n Tries to get some text before the cursor, ideally a word or two, for the\n purpose of auto-spacing & -capitalization. Results are not guaranteed;\n dictation_peek_left() may return None to indicate no information. (Note\n that returning the empty string \"\" indicates there is nothing before\n cursor, ie. we are at the beginning of the document.)\n\n If there is currently a selection, dictation_peek_left() must leave it\n unchanged unless `clobber` is true, in which case it may clobber it.\n \"\"\"\n # Get rid of the selection if it exists.\n if clobber: actions.user.clobber_selection_if_exists()\n # Otherwise, if there's a selection, fail.\n elif \"\" != actions.edit.selected_text(): return None\n\n # In principle the previous word should suffice, but some applications\n # have a funny concept of what the previous word is (for example, they\n # may only take the \"`\" at the end of \"`foo`\"). To be double sure we\n # take two words left. I also tried taking a line up + a word left, but\n # edit.extend_up() = key(shift-up) doesn't work consistently in the\n # Slack webapp (sometimes escapes the text box).\n actions.edit.extend_word_left()\n actions.edit.extend_word_left()\n text = actions.edit.selected_text()\n # if we're at the beginning of the document/text box, we may not have\n # selected any text, in which case we shouldn't move the cursor.\n if text:\n # Unfortunately, in web Slack, if our selection ends at newline,\n # this will go right over the newline. Argh.\n actions.edit.right()\n return text\n\n def clobber_selection_if_exists():\n \"\"\"Deletes the currently selected text if it exists; otherwise does nothing.\"\"\"\n actions.key(\"space backspace\")\n # This space-backspace trick is fast and reliable but has the\n # side-effect of cluttering the undo history. Other options:\n #\n # 1. Call edit.cut() inside a clip.revert() block. This assumes\n # edit.cut() is supported AND will be a no-op if there's no\n # selection. Unfortunately, sometimes one or both of these is false,\n # eg. the notion webapp makes ctrl-x cut the current block by default\n # if nothing is selected.\n #\n # 2. Test whether a selection exists by asking whether\n # edit.selected_text() is empty; if it does, use edit.delete(). This\n # usually uses the clipboard, which can be quite slow. Also, not sure\n # how this would interact with switching edit.selected_text() to use\n # the selection clipboard on linux, which can be nonempty even if no\n # text is selected in the current application.\n #\n # Perhaps this ought to be configurable by a setting.\n\n def dictation_peek_right() -> Optional[str]:\n \"\"\"\n Tries to get a few characters after the cursor for auto-spacing.\n Results are not guaranteed; dictation_peek_right() may return None to\n indicate no information. (Note that returning the empty string \"\"\n indicates there is nothing after cursor, ie. we are at the end of the\n document.)\n \"\"\"\n # We grab two characters because I think that's what no_space_before\n # needs in the worst case. An example where the second character matters\n # is inserting before (1) \"' hello\" vs (2) \"'hello\". In case (1) we\n # don't want to add space, in case (2) we do.\n actions.edit.extend_right()\n actions.edit.extend_right()\n after = actions.edit.selected_text()\n if after: actions.edit.left()\n return after\n\n# Use the dictation formatter in dictation mode.\ndictation_ctx = Context()\ndictation_ctx.matches = r\"\"\"\nmode: dictation\n\"\"\"\n\n@dictation_ctx.action_class(\"main\")\nclass main_action:\n def auto_insert(text): actions.user.dictation_insert(text)\n","path":"code/dictation.py"}],"string":"[\n {\n \"content\": \"# Descended from https://github.com/dwiel/talon_community/blob/master/misc/dictation.py\\nfrom talon import Module, Context, ui, actions, clip, app, grammar\\nfrom typing import Optional, Tuple, Literal, Callable\\nimport re\\n\\nmod = Module()\\n\\nsetting_context_sensitive_dictation = mod.setting(\\n \\\"context_sensitive_dictation\\\",\\n type=bool,\\n default=False,\\n desc=\\\"Look at surrounding text to improve auto-capitalization/spacing in dictation mode. By default, this works by selecting that text & copying it to the clipboard, so it may be slow or fail in some applications.\\\",\\n)\\n\\nmod.list(\\\"prose_modifiers\\\", desc=\\\"Modifiers that can be used within prose\\\")\\nmod.list(\\\"prose_snippets\\\", desc=\\\"Snippets that can be used within prose\\\")\\nctx = Context()\\n# Maps spoken forms to DictationFormat method names (see DictationFormat below).\\nctx.lists[\\\"user.prose_modifiers\\\"] = {\\n \\\"cap\\\": \\\"cap\\\",\\n \\\"no cap\\\": \\\"no_cap\\\",\\n \\\"no caps\\\": \\\"no_cap\\\", # \\\"no caps\\\" variant for Dragon\\n \\\"no space\\\": \\\"no_space\\\",\\n}\\nctx.lists[\\\"user.prose_snippets\\\"] = {\\n \\\"spacebar\\\": \\\" \\\",\\n \\\"new line\\\": \\\"\\\\n\\\",\\n \\\"new paragraph\\\": \\\"\\\\n\\\\n\\\",\\n # Curly quotes are used to obtain proper spacing for left and right quotes, but will later be straightened.\\n \\\"open quote\\\": \\\"“\\\",\\n \\\"close quote\\\": \\\"”\\\",\\n \\\"smiley\\\": \\\":-)\\\",\\n \\\"winky\\\": \\\";-)\\\",\\n \\\"frowny\\\": \\\":-(\\\",\\n}\\n\\n@mod.capture(rule=\\\"{user.prose_modifiers}\\\")\\ndef prose_modifier(m) -> Callable:\\n return getattr(DictationFormat, m.prose_modifiers)\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | )\\\")\\ndef word(m) -> str:\\n \\\"\\\"\\\"A single word, including user-defined vocabulary.\\\"\\\"\\\"\\n try:\\n return m.vocabulary\\n except AttributeError:\\n return \\\" \\\".join(actions.user.replace_phrases(actions.dictate.parse_words(m.word)))\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | )+\\\")\\ndef text(m) -> str:\\n \\\"\\\"\\\"A sequence of words, including user-defined vocabulary.\\\"\\\"\\\"\\n return format_phrase(m)\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | | )+\\\")\\ndef prose(m) -> str:\\n \\\"\\\"\\\"Mixed words and punctuation, auto-spaced & capitalized.\\\"\\\"\\\"\\n # Straighten curly quotes that were introduced to obtain proper spacing.\\n return apply_formatting(m).replace(\\\"“\\\", \\\"\\\\\\\"\\\").replace(\\\"”\\\", \\\"\\\\\\\"\\\")\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | )+\\\")\\ndef raw_prose(m) -> str:\\n \\\"\\\"\\\"Mixed words and punctuation, auto-spaced & capitalized, without quote straightening and commands (for use in dictation mode).\\\"\\\"\\\"\\n return apply_formatting(m)\\n\\n\\f\\n# ---------- FORMATTING ---------- #\\ndef format_phrase(m):\\n words = capture_to_words(m)\\n result = \\\"\\\"\\n for i, word in enumerate(words):\\n if i > 0 and needs_space_between(words[i-1], word):\\n result += \\\" \\\"\\n result += word\\n return result\\n\\ndef capture_to_words(m):\\n words = []\\n for item in m:\\n words.extend(\\n actions.user.replace_phrases(actions.dictate.parse_words(item))\\n if isinstance(item, grammar.vm.Phrase)\\n else [item])\\n return words\\n\\ndef apply_formatting(m):\\n formatter = DictationFormat()\\n formatter.state = None\\n result = \\\"\\\"\\n for item in m:\\n # prose modifiers (cap/no cap/no space) produce formatter callbacks.\\n if isinstance(item, Callable):\\n item(formatter)\\n else:\\n words = (actions.user.replace_phrases(actions.dictate.parse_words(item))\\n if isinstance(item, grammar.vm.Phrase)\\n else [item])\\n for word in words:\\n result += formatter.format(word)\\n return result\\n\\n# There must be a simpler way to do this, but I don't see it right now.\\nno_space_after = re.compile(r\\\"\\\"\\\"\\n (?:\\n [\\\\s\\\\-_/#@([{‘“] # characters that never need space after them\\n | (? bool:\\n return not text or no_space_before.search(text)\\ndef omit_space_after(text: str) -> bool:\\n return not text or no_space_after.search(text)\\ndef needs_space_between(before: str, after: str) -> bool:\\n return not (omit_space_after(before) or omit_space_before(after))\\n\\n# # TESTS, uncomment to enable\\n# assert needs_space_between(\\\"a\\\", \\\"break\\\")\\n# assert needs_space_between(\\\"break\\\", \\\"a\\\")\\n# assert needs_space_between(\\\".\\\", \\\"a\\\")\\n# assert needs_space_between(\\\"said\\\", \\\"'hello\\\")\\n# assert needs_space_between(\\\"hello'\\\", \\\"said\\\")\\n# assert needs_space_between(\\\"hello.\\\", \\\"'John\\\")\\n# assert needs_space_between(\\\"John.'\\\", \\\"They\\\")\\n# assert needs_space_between(\\\"paid\\\", \\\"$50\\\")\\n# assert needs_space_between(\\\"50$\\\", \\\"payment\\\")\\n# assert not needs_space_between(\\\"\\\", \\\"\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\"\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\" \\\")\\n# assert not needs_space_between(\\\"\\\", \\\"a\\\")\\n# assert not needs_space_between(\\\" \\\", \\\"a\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\",\\\")\\n# assert not needs_space_between(\\\"'\\\", \\\"a\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\"'\\\")\\n# assert not needs_space_between(\\\"and-\\\", \\\"or\\\")\\n# assert not needs_space_between(\\\"mary\\\", \\\"-kate\\\")\\n# assert not needs_space_between(\\\"$\\\", \\\"50\\\")\\n# assert not needs_space_between(\\\"US\\\", \\\"$\\\")\\n# assert not needs_space_between(\\\"(\\\", \\\")\\\")\\n# assert not needs_space_between(\\\"(\\\", \\\"e.g.\\\")\\n# assert not needs_space_between(\\\"example\\\", \\\")\\\")\\n# assert not needs_space_between(\\\"example\\\", '\\\".')\\n# assert not needs_space_between(\\\"example\\\", '.\\\"')\\n# assert not needs_space_between(\\\"hello'\\\", \\\".\\\")\\n# assert not needs_space_between(\\\"hello.\\\", \\\"'\\\")\\n\\ndef auto_capitalize(text, state = None):\\n \\\"\\\"\\\"\\n Auto-capitalizes text. `state` argument means:\\n\\n - None: Don't capitalize initial word.\\n - \\\"sentence start\\\": Capitalize initial word.\\n - \\\"after newline\\\": Don't capitalize initial word, but we're after a newline.\\n Used for double-newline detection.\\n\\n Returns (capitalized text, updated state).\\n \\\"\\\"\\\"\\n output = \\\"\\\"\\n # Imagine a metaphorical \\\"capitalization charge\\\" travelling through the\\n # string left-to-right.\\n charge = state == \\\"sentence start\\\"\\n newline = state == \\\"after newline\\\"\\n for c in text:\\n # Sentence endings & double newlines create a charge.\\n if c in \\\".!?\\\" or (newline and c == \\\"\\\\n\\\"):\\n charge = True\\n # Alphanumeric characters and commas/colons absorb charge & try to\\n # capitalize (for numbers & punctuation this does nothing, which is what\\n # we want).\\n elif charge and (c.isalnum() or c in \\\",:\\\"):\\n charge = False\\n c = c.capitalize()\\n # Otherwise the charge just passes through.\\n output += c\\n newline = c == \\\"\\\\n\\\"\\n return output, (\\\"sentence start\\\" if charge else\\n \\\"after newline\\\" if newline else None)\\n\\n\\f\\n# ---------- DICTATION AUTO FORMATTING ---------- #\\nclass DictationFormat:\\n def __init__(self):\\n self.reset()\\n\\n def reset(self):\\n self.reset_context()\\n self.force_no_space = False\\n self.force_capitalization = None # Can also be \\\"cap\\\" or \\\"no cap\\\".\\n\\n def reset_context(self):\\n self.before = \\\"\\\"\\n self.state = \\\"sentence start\\\"\\n\\n def update_context(self, before):\\n if before is None: return\\n self.reset_context()\\n self.pass_through(before)\\n\\n def pass_through(self, text):\\n _, self.state = auto_capitalize(text, self.state)\\n self.before = text or self.before\\n\\n def format(self, text, auto_cap=True):\\n if not self.force_no_space and needs_space_between(self.before, text):\\n text = \\\" \\\" + text\\n self.force_no_space = False\\n if auto_cap:\\n text, self.state = auto_capitalize(text, self.state)\\n if self.force_capitalization == \\\"cap\\\":\\n text = format_first_letter(text, lambda s: s.capitalize())\\n self.force_capitalization = None\\n if self.force_capitalization == \\\"no cap\\\":\\n text = format_first_letter(text, lambda s: s.lower())\\n self.force_capitalization = None\\n self.before = text or self.before\\n return text\\n\\n # These are used as callbacks by prose modifiers / dictation_mode commands.\\n def cap(self): self.force_capitalization = \\\"cap\\\"\\n def no_cap(self): self.force_capitalization = \\\"no cap\\\"\\n def no_space(self):\\n # This is typically used after repositioning the cursor, so it is helpful to\\n # reset capitalization as well.\\n #\\n # FIXME: this sets state to \\\"sentence start\\\", capitalizing the next\\n # word. probably undesirable, since most places are not the start of\\n # sentences?\\n self.reset_context()\\n self.force_no_space = True\\n\\ndef format_first_letter(text, formatter):\\n i = -1\\n for i, c in enumerate(text):\\n if c.isalpha():\\n break\\n if i >= 0 and i < len(text):\\n text = text[:i] + formatter(text[i]) + text[i+1:]\\n return text\\n\\ndictation_formatter = DictationFormat()\\nui.register(\\\"app_deactivate\\\", lambda app: dictation_formatter.reset())\\nui.register(\\\"win_focus\\\", lambda win: dictation_formatter.reset())\\n\\ndef reformat_last_utterance(formatter):\\n text = actions.user.get_last_phrase()\\n actions.user.clear_last_phrase()\\n text = formatter(text)\\n actions.user.add_phrase_to_history(text)\\n actions.insert(text)\\n\\n@mod.action_class\\nclass Actions:\\n def dictation_format_reset():\\n \\\"\\\"\\\"Resets the dictation formatter\\\"\\\"\\\"\\n return dictation_formatter.reset()\\n\\n def dictation_format_cap():\\n \\\"\\\"\\\"Sets the dictation formatter to capitalize\\\"\\\"\\\"\\n dictation_formatter.cap()\\n\\n def dictation_format_no_cap():\\n \\\"\\\"\\\"Sets the dictation formatter to not capitalize\\\"\\\"\\\"\\n dictation_formatter.no_cap()\\n\\n def dictation_format_no_space():\\n \\\"\\\"\\\"Sets the dictation formatter to not prepend a space\\\"\\\"\\\"\\n dictation_formatter.no_space()\\n\\n def dictation_reformat_cap():\\n \\\"\\\"\\\"Capitalizes the last utterance\\\"\\\"\\\"\\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.capitalize()))\\n\\n def dictation_reformat_no_cap():\\n \\\"\\\"\\\"Lowercases the last utterance\\\"\\\"\\\"\\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.lower()))\\n\\n def dictation_reformat_no_space():\\n \\\"\\\"\\\"Removes space before the last utterance\\\"\\\"\\\"\\n reformat_last_utterance(lambda s: s[1:] if s.startswith(\\\" \\\") else s)\\n\\n def dictation_insert_raw(text: str):\\n \\\"\\\"\\\"Inserts text as-is, without invoking the dictation formatter.\\\"\\\"\\\"\\n actions.user.dictation_insert(text, auto_cap=False)\\n\\n def dictation_insert(text: str, auto_cap: bool=True) -> str:\\n \\\"\\\"\\\"Inserts dictated text, formatted appropriately.\\\"\\\"\\\"\\n add_space_after = False\\n if setting_context_sensitive_dictation.get():\\n # Peek left if we might need leading space or auto-capitalization.\\n if (not omit_space_before(text)\\n or text != auto_capitalize(text, \\\"sentence start\\\")[0]):\\n dictation_formatter.update_context(\\n actions.user.dictation_peek_left(clobber=True))\\n # Peek right if we might need trailing space. NB. We peek right\\n # BEFORE insertion to avoid breaking the undo-chain between the\\n # inserted text and the trailing space.\\n if not omit_space_after(text):\\n char = actions.user.dictation_peek_right()\\n add_space_after = char is not None and needs_space_between(text, char)\\n text = dictation_formatter.format(text, auto_cap)\\n # Straighten curly quotes that were introduced to obtain proper\\n # spacing. The formatter context still has the original curly quotes\\n # so that future dictation is properly formatted.\\n text = text.replace(\\\"“\\\", \\\"\\\\\\\"\\\").replace(\\\"”\\\", \\\"\\\\\\\"\\\")\\n actions.user.add_phrase_to_history(text)\\n # we insert the text all at once in case we have an implementation of\\n # insert that is more efficient for long strings, eg. paste-to-insert\\n actions.insert(text + (\\\" \\\" if add_space_after else \\\"\\\"))\\n if add_space_after: actions.edit.left()\\n\\n def dictation_peek_left(clobber: bool = False) -> Optional[str]:\\n \\\"\\\"\\\"\\n Tries to get some text before the cursor, ideally a word or two, for the\\n purpose of auto-spacing & -capitalization. Results are not guaranteed;\\n dictation_peek_left() may return None to indicate no information. (Note\\n that returning the empty string \\\"\\\" indicates there is nothing before\\n cursor, ie. we are at the beginning of the document.)\\n\\n If there is currently a selection, dictation_peek_left() must leave it\\n unchanged unless `clobber` is true, in which case it may clobber it.\\n \\\"\\\"\\\"\\n # Get rid of the selection if it exists.\\n if clobber: actions.user.clobber_selection_if_exists()\\n # Otherwise, if there's a selection, fail.\\n elif \\\"\\\" != actions.edit.selected_text(): return None\\n\\n # In principle the previous word should suffice, but some applications\\n # have a funny concept of what the previous word is (for example, they\\n # may only take the \\\"`\\\" at the end of \\\"`foo`\\\"). To be double sure we\\n # take two words left. I also tried taking a line up + a word left, but\\n # edit.extend_up() = key(shift-up) doesn't work consistently in the\\n # Slack webapp (sometimes escapes the text box).\\n actions.edit.extend_word_left()\\n actions.edit.extend_word_left()\\n text = actions.edit.selected_text()\\n # if we're at the beginning of the document/text box, we may not have\\n # selected any text, in which case we shouldn't move the cursor.\\n if text:\\n # Unfortunately, in web Slack, if our selection ends at newline,\\n # this will go right over the newline. Argh.\\n actions.edit.right()\\n return text\\n\\n def clobber_selection_if_exists():\\n \\\"\\\"\\\"Deletes the currently selected text if it exists; otherwise does nothing.\\\"\\\"\\\"\\n actions.key(\\\"space backspace\\\")\\n # This space-backspace trick is fast and reliable but has the\\n # side-effect of cluttering the undo history. Other options:\\n #\\n # 1. Call edit.cut() inside a clip.revert() block. This assumes\\n # edit.cut() is supported AND will be a no-op if there's no\\n # selection. Unfortunately, sometimes one or both of these is false,\\n # eg. the notion webapp makes ctrl-x cut the current block by default\\n # if nothing is selected.\\n #\\n # 2. Test whether a selection exists by asking whether\\n # edit.selected_text() is empty; if it does, use edit.delete(). This\\n # usually uses the clipboard, which can be quite slow. Also, not sure\\n # how this would interact with switching edit.selected_text() to use\\n # the selection clipboard on linux, which can be nonempty even if no\\n # text is selected in the current application.\\n #\\n # Perhaps this ought to be configurable by a setting.\\n\\n def dictation_peek_right() -> Optional[str]:\\n \\\"\\\"\\\"\\n Tries to get a few characters after the cursor for auto-spacing.\\n Results are not guaranteed; dictation_peek_right() may return None to\\n indicate no information. (Note that returning the empty string \\\"\\\"\\n indicates there is nothing after cursor, ie. we are at the end of the\\n document.)\\n \\\"\\\"\\\"\\n # We grab two characters because I think that's what no_space_before\\n # needs in the worst case. An example where the second character matters\\n # is inserting before (1) \\\"' hello\\\" vs (2) \\\"'hello\\\". In case (1) we\\n # don't want to add space, in case (2) we do.\\n actions.edit.extend_right()\\n actions.edit.extend_right()\\n after = actions.edit.selected_text()\\n if after: actions.edit.left()\\n return after\\n\\n# Use the dictation formatter in dictation mode.\\ndictation_ctx = Context()\\ndictation_ctx.matches = r\\\"\\\"\\\"\\nmode: dictation\\n\\\"\\\"\\\"\\n\\n@dictation_ctx.action_class(\\\"main\\\")\\nclass main_action:\\n def auto_insert(text): actions.user.dictation_insert(text)\\n\",\n \"path\": \"code/dictation.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Descended from https://github.com/dwiel/talon_community/blob/master/misc/dictation.py\nfrom talon import Module, Context, ui, actions, clip, app, grammar\nfrom typing import Optional, Tuple, Literal, Callable\nimport re\n\nmod = Module()\n\nsetting_context_sensitive_dictation = mod.setting(\n \"context_sensitive_dictation\",\n type=bool,\n default=False,\n desc=\"Look at surrounding text to improve auto-capitalization/spacing in dictation mode. By default, this works by selecting that text & copying it to the clipboard, so it may be slow or fail in some applications.\",\n)\n\nmod.list(\"prose_modifiers\", desc=\"Modifiers that can be used within prose\")\nmod.list(\"prose_snippets\", desc=\"Snippets that can be used within prose\")\nctx = Context()\n# Maps spoken forms to DictationFormat method names (see DictationFormat below).\nctx.lists[\"user.prose_modifiers\"] = {\n \"cap\": \"cap\",\n \"no cap\": \"no_cap\",\n \"no caps\": \"no_cap\", # \"no caps\" variant for Dragon\n \"no space\": \"no_space\",\n}\nctx.lists[\"user.prose_snippets\"] = {\n \"spacebar\": \" \",\n \"new line\": \"\\n\",\n \"new paragraph\": \"\\n\\n\",\n # Curly quotes are used to obtain proper spacing for left and right quotes, but will later be straightened.\n \"open quote\": \"“\",\n \"close quote\": \"”\",\n \"smiley\": \":-)\",\n \"winky\": \";-)\",\n \"frowny\": \":-(\",\n}\n\n@mod.capture(rule=\"{user.prose_modifiers}\")\ndef prose_modifier(m) -> Callable:\n return getattr(DictationFormat, m.prose_modifiers)\n\n@mod.capture(rule=\"({user.vocabulary} | )\")\ndef word(m) -> str:\n \"\"\"A single word, including user-defined vocabulary.\"\"\"\n try:\n return m.vocabulary\n except AttributeError:\n return \" \".join(actions.user.replace_phrases(actions.dictate.parse_words(m.word)))\n\n@mod.capture(rule=\"({user.vocabulary} | )+\")\ndef text(m) -> str:\n \"\"\"A sequence of words, including user-defined vocabulary.\"\"\"\n return format_phrase(m)\n\n@mod.capture(rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | | )+\")\ndef prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized.\"\"\"\n # Straighten curly quotes that were introduced to obtain proper spacing.\n return apply_formatting(m).replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n\n@mod.capture(rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | )+\")\ndef raw_prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized, without quote straightening and commands (for use in dictation mode).\"\"\"\n return apply_formatting(m)\n\n\f\n# ---------- FORMATTING ---------- #\ndef format_phrase(m):\n words = capture_to_words(m)\n result = \"\"\n for i, word in enumerate(words):\n if i > 0 and needs_space_between(words[i-1], word):\n result += \" \"\n result += word\n return result\n\ndef capture_to_words(m):\n words = []\n for item in m:\n words.extend(\n actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n return words\n\ndef apply_formatting(m):\n formatter = DictationFormat()\n formatter.state = None\n result = \"\"\n for item in m:\n # prose modifiers (cap/no cap/no space) produce formatter callbacks.\n if isinstance(item, Callable):\n item(formatter)\n else:\n words = (actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n for word in words:\n result += formatter.format(word)\n return result\n\n# There must be a simpler way to do this, but I don't see it right now.\nno_space_after = re.compile(r\"\"\"\n (?:\n [\\s\\-_/#@([{‘“] # characters that never need space after them\n | (? bool:\n return not text or no_space_before.search(text)\ndef omit_space_after(text: str) -> bool:\n return not text or no_space_after.search(text)\ndef needs_space_between(before: str, after: str) -> bool:\n return not (omit_space_after(before) or omit_space_before(after))\n\n# # TESTS, uncomment to enable\n# assert needs_space_between(\"a\", \"break\")\n# assert needs_space_between(\"break\", \"a\")\n# assert needs_space_between(\".\", \"a\")\n# assert needs_space_between(\"said\", \"'hello\")\n# assert needs_space_between(\"hello'\", \"said\")\n# assert needs_space_between(\"hello.\", \"'John\")\n# assert needs_space_between(\"John.'\", \"They\")\n# assert needs_space_between(\"paid\", \"$50\")\n# assert needs_space_between(\"50$\", \"payment\")\n# assert not needs_space_between(\"\", \"\")\n# assert not needs_space_between(\"a\", \"\")\n# assert not needs_space_between(\"a\", \" \")\n# assert not needs_space_between(\"\", \"a\")\n# assert not needs_space_between(\" \", \"a\")\n# assert not needs_space_between(\"a\", \",\")\n# assert not needs_space_between(\"'\", \"a\")\n# assert not needs_space_between(\"a\", \"'\")\n# assert not needs_space_between(\"and-\", \"or\")\n# assert not needs_space_between(\"mary\", \"-kate\")\n# assert not needs_space_between(\"$\", \"50\")\n# assert not needs_space_between(\"US\", \"$\")\n# assert not needs_space_between(\"(\", \")\")\n# assert not needs_space_between(\"(\", \"e.g.\")\n# assert not needs_space_between(\"example\", \")\")\n# assert not needs_space_between(\"example\", '\".')\n# assert not needs_space_between(\"example\", '.\"')\n# assert not needs_space_between(\"hello'\", \".\")\n# assert not needs_space_between(\"hello.\", \"'\")\n\ndef auto_capitalize(text, state = None):\n \"\"\"\n Auto-capitalizes text. `state` argument means:\n\n - None: Don't capitalize initial word.\n - \"sentence start\": Capitalize initial word.\n - \"after newline\": Don't capitalize initial word, but we're after a newline.\n Used for double-newline detection.\n\n Returns (capitalized text, updated state).\n \"\"\"\n output = \"\"\n # Imagine a metaphorical \"capitalization charge\" travelling through the\n # string left-to-right.\n charge = state == \"sentence start\"\n newline = state == \"after newline\"\n for c in text:\n # Sentence endings & double newlines create a charge.\n if c in \".!?\" or (newline and c == \"\\n\"):\n charge = True\n # Alphanumeric characters and commas/colons absorb charge & try to\n # capitalize (for numbers & punctuation this does nothing, which is what\n # we want).\n elif charge and (c.isalnum() or c in \",:\"):\n charge = False\n c = c.capitalize()\n # Otherwise the charge just passes through.\n output += c\n newline = c == \"\\n\"\n return output, (\"sentence start\" if charge else\n \"after newline\" if newline else None)\n\n\f\n# ---------- DICTATION AUTO FORMATTING ---------- #\nclass DictationFormat:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.reset_context()\n self.force_no_space = False\n self.force_capitalization = None # Can also be \"cap\" or \"no cap\".\n\n def reset_context(self):\n self.before = \"\"\n self.state = \"sentence start\"\n\n def update_context(self, before):\n if before is None: return\n self.reset_context()\n self.pass_through(before)\n\n def pass_through(self, text):\n _, self.state = auto_capitalize(text, self.state)\n self.before = text or self.before\n\n def format(self, text, auto_cap=True):\n if not self.force_no_space and needs_space_between(self.before, text):\n text = \" \" + text\n self.force_no_space = False\n if auto_cap:\n text, self.state = auto_capitalize(text, self.state)\n if self.force_capitalization == \"cap\":\n text = format_first_letter(text, lambda s: s.capitalize())\n self.force_capitalization = None\n if self.force_capitalization == \"no cap\":\n text = format_first_letter(text, lambda s: s.lower())\n self.force_capitalization = None\n self.before = text or self.before\n return text\n\n # These are used as callbacks by prose modifiers / dictation_mode commands.\n def cap(self): self.force_capitalization = \"cap\"\n def no_cap(self): self.force_capitalization = \"no cap\"\n def no_space(self):\n # This is typically used after repositioning the cursor, so it is helpful to\n # reset capitalization as well.\n #\n # FIXME: this sets state to \"sentence start\", capitalizing the next\n # word. probably undesirable, since most places are not the start of\n # sentences?\n self.reset_context()\n self.force_no_space = True\n\ndef format_first_letter(text, formatter):\n i = -1\n for i, c in enumerate(text):\n if c.isalpha():\n break\n if i >= 0 and i < len(text):\n text = text[:i] + formatter(text[i]) + text[i+1:]\n return text\n\ndictation_formatter = DictationFormat()\nui.register(\"app_deactivate\", lambda app: dictation_formatter.reset())\nui.register(\"win_focus\", lambda win: dictation_formatter.reset())\n\ndef reformat_last_utterance(formatter):\n text = actions.user.get_last_phrase()\n actions.user.clear_last_phrase()\n text = formatter(text)\n actions.user.add_phrase_to_history(text)\n actions.insert(text)\n\n@mod.action_class\nclass Actions:\n def dictation_format_reset():\n \"\"\"Resets the dictation formatter\"\"\"\n return dictation_formatter.reset()\n\n def dictation_format_cap():\n \"\"\"Sets the dictation formatter to capitalize\"\"\"\n dictation_formatter.cap()\n\n def dictation_format_no_cap():\n \"\"\"Sets the dictation formatter to not capitalize\"\"\"\n dictation_formatter.no_cap()\n\n def dictation_format_no_space():\n \"\"\"Sets the dictation formatter to not prepend a space\"\"\"\n dictation_formatter.no_space()\n\n def dictation_reformat_cap():\n \"\"\"Capitalizes the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.capitalize()))\n\n def dictation_reformat_no_cap():\n \"\"\"Lowercases the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.lower()))\n\n def dictation_reformat_no_space():\n \"\"\"Removes space before the last utterance\"\"\"\n reformat_last_utterance(lambda s: s[1:] if s.startswith(\" \") else s)\n\n def dictation_insert_raw(text: str):\n \"\"\"Inserts text as-is, without invoking the dictation formatter.\"\"\"\n actions.user.dictation_insert(text, auto_cap=False)\n\n def dictation_insert(text: str, auto_cap: bool=True) -> str:\n \"\"\"Inserts dictated text, formatted appropriately.\"\"\"\n add_space_after = False\n if setting_context_sensitive_dictation.get():\n # Peek left if we might need leading space or auto-capitalization.\n if (not omit_space_before(text)\n or text != auto_capitalize(text, \"sentence start\")[0]):\n dictation_formatter.update_context(\n actions.user.dictation_peek_left(clobber=True))\n # Peek right if we might need trailing space. NB. We peek right\n # BEFORE insertion to avoid breaking the undo-chain between the\n # inserted text and the trailing space.\n if not omit_space_after(text):\n char = actions.user.dictation_peek_right()\n add_space_after = char is not None and needs_space_between(text, char)\n text = dictation_formatter.format(text, auto_cap)\n # Straighten curly quotes that were introduced to obtain proper\n # spacing. The formatter context still has the original curly quotes\n # so that future dictation is properly formatted.\n text = text.replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n actions.user.add_phrase_to_history(text)\n # we insert the text all at once in case we have an implementation of\n # insert that is more efficient for long strings, eg. paste-to-insert\n actions.insert(text + (\" \" if add_space_after else \"\"))\n if add_space_after: actions.edit.left()\n\n def dictation_peek_left(clobber: bool = False) -> Optional[str]:\n \"\"\"\n Tries to get some text before the cursor, ideally a word or two, for the\n purpose of auto-spacing & -capitalization. Results are not guaranteed;\n dictation_peek_left() may return None to indicate no information. (Note\n that returning the empty string \"\" indicates there is nothing before\n cursor, ie. we are at the beginning of the document.)\n\n If there is currently a selection, dictation_peek_left() must leave it\n unchanged unless `clobber` is true, in which case it may clobber it.\n \"\"\"\n # Get rid of the selection if it exists.\n if clobber: actions.user.clobber_selection_if_exists()\n # Otherwise, if there's a selection, fail.\n elif \"\" != actions.edit.selected_text(): return None\n\n # In principle the previous word should suffice, but some applications\n # have a funny concept of what the previous word is (for example, they\n # may only take the \"`\" at the end of \"`foo`\"). To be double sure we\n # take two words left. I also tried taking a line up + a word left, but\n # edit.extend_up() = key(shift-up) doesn't work consistently in the\n # Slack webapp (sometimes escapes the text box).\n actions.edit.extend_word_left()\n actions.edit.extend_word_left()\n text = actions.edit.selected_text()\n # if we're at the beginning of the document/text box, we may not have\n # selected any text, in which case we shouldn't move the cursor.\n if text:\n # Unfortunately, in web Slack, if our selection ends at newline,\n # this will go right over the newline. Argh.\n actions.edit.right()\n return text\n\n def clobber_selection_if_exists():\n \"\"\"Deletes the currently selected text if it exists; otherwise does nothing.\"\"\"\n actions.key(\"space backspace\")\n # This space-backspace trick is fast and reliable but has the\n # side-effect of cluttering the undo history. Other options:\n #\n # 1. Call edit.cut() inside a clip.revert() block. This assumes\n # edit.cut() is supported AND will be a no-op if there's no\n # selection. Unfortunately, sometimes one or both of these is false,\n # eg. the notion webapp makes ctrl-x cut the current block by default\n # if nothing is selected.\n #\n # 2. Test whether a selection exists by asking whether\n # edit.selected_text() is empty; if it does, use edit.delete(). This\n # usually uses the clipboard, which can be quite slow. Also, not sure\n # how this would interact with switching edit.selected_text() to use\n # the selection clipboard on linux, which can be nonempty even if no\n # text is selected in the current application.\n #\n # Perhaps this ought to be configurable by a setting.\n\n def dictation_peek_right() -> Optional[str]:\n \"\"\"\n Tries to get a few characters after the cursor for auto-spacing.\n Results are not guaranteed; dictation_peek_right() may return None to\n indicate no information. (Note that returning the empty string \"\"\n indicates there is nothing after cursor, ie. we are at the end of the\n document.)\n \"\"\"\n # We grab two characters because I think that's what no_space_before\n # needs in the worst case. An example where the second character matters\n # is inserting before (1) \"' hello\" vs (2) \"'hello\". In case (1) we\n # don't want to add space, in case (2) we do.\n actions.edit.extend_right()\n actions.edit.extend_right()\n after = actions.edit.selected_text()\n if after: actions.edit.left()\n return after\n\n# Use the dictation formatter in dictation mode.\ndictation_ctx = Context()\ndictation_ctx.matches = r\"\"\"\nmode: dictation\n\"\"\"\n\n@dictation_ctx.action_class(\"main\")\nclass main_action:\n def auto_insert(text):\n actions.user.dictation_insert(actions.auto_format(text))\n","path":"code/dictation.py"}],"string":"[\n {\n \"content\": \"# Descended from https://github.com/dwiel/talon_community/blob/master/misc/dictation.py\\nfrom talon import Module, Context, ui, actions, clip, app, grammar\\nfrom typing import Optional, Tuple, Literal, Callable\\nimport re\\n\\nmod = Module()\\n\\nsetting_context_sensitive_dictation = mod.setting(\\n \\\"context_sensitive_dictation\\\",\\n type=bool,\\n default=False,\\n desc=\\\"Look at surrounding text to improve auto-capitalization/spacing in dictation mode. By default, this works by selecting that text & copying it to the clipboard, so it may be slow or fail in some applications.\\\",\\n)\\n\\nmod.list(\\\"prose_modifiers\\\", desc=\\\"Modifiers that can be used within prose\\\")\\nmod.list(\\\"prose_snippets\\\", desc=\\\"Snippets that can be used within prose\\\")\\nctx = Context()\\n# Maps spoken forms to DictationFormat method names (see DictationFormat below).\\nctx.lists[\\\"user.prose_modifiers\\\"] = {\\n \\\"cap\\\": \\\"cap\\\",\\n \\\"no cap\\\": \\\"no_cap\\\",\\n \\\"no caps\\\": \\\"no_cap\\\", # \\\"no caps\\\" variant for Dragon\\n \\\"no space\\\": \\\"no_space\\\",\\n}\\nctx.lists[\\\"user.prose_snippets\\\"] = {\\n \\\"spacebar\\\": \\\" \\\",\\n \\\"new line\\\": \\\"\\\\n\\\",\\n \\\"new paragraph\\\": \\\"\\\\n\\\\n\\\",\\n # Curly quotes are used to obtain proper spacing for left and right quotes, but will later be straightened.\\n \\\"open quote\\\": \\\"“\\\",\\n \\\"close quote\\\": \\\"”\\\",\\n \\\"smiley\\\": \\\":-)\\\",\\n \\\"winky\\\": \\\";-)\\\",\\n \\\"frowny\\\": \\\":-(\\\",\\n}\\n\\n@mod.capture(rule=\\\"{user.prose_modifiers}\\\")\\ndef prose_modifier(m) -> Callable:\\n return getattr(DictationFormat, m.prose_modifiers)\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | )\\\")\\ndef word(m) -> str:\\n \\\"\\\"\\\"A single word, including user-defined vocabulary.\\\"\\\"\\\"\\n try:\\n return m.vocabulary\\n except AttributeError:\\n return \\\" \\\".join(actions.user.replace_phrases(actions.dictate.parse_words(m.word)))\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | )+\\\")\\ndef text(m) -> str:\\n \\\"\\\"\\\"A sequence of words, including user-defined vocabulary.\\\"\\\"\\\"\\n return format_phrase(m)\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | | )+\\\")\\ndef prose(m) -> str:\\n \\\"\\\"\\\"Mixed words and punctuation, auto-spaced & capitalized.\\\"\\\"\\\"\\n # Straighten curly quotes that were introduced to obtain proper spacing.\\n return apply_formatting(m).replace(\\\"“\\\", \\\"\\\\\\\"\\\").replace(\\\"”\\\", \\\"\\\\\\\"\\\")\\n\\n@mod.capture(rule=\\\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | )+\\\")\\ndef raw_prose(m) -> str:\\n \\\"\\\"\\\"Mixed words and punctuation, auto-spaced & capitalized, without quote straightening and commands (for use in dictation mode).\\\"\\\"\\\"\\n return apply_formatting(m)\\n\\n\\f\\n# ---------- FORMATTING ---------- #\\ndef format_phrase(m):\\n words = capture_to_words(m)\\n result = \\\"\\\"\\n for i, word in enumerate(words):\\n if i > 0 and needs_space_between(words[i-1], word):\\n result += \\\" \\\"\\n result += word\\n return result\\n\\ndef capture_to_words(m):\\n words = []\\n for item in m:\\n words.extend(\\n actions.user.replace_phrases(actions.dictate.parse_words(item))\\n if isinstance(item, grammar.vm.Phrase)\\n else [item])\\n return words\\n\\ndef apply_formatting(m):\\n formatter = DictationFormat()\\n formatter.state = None\\n result = \\\"\\\"\\n for item in m:\\n # prose modifiers (cap/no cap/no space) produce formatter callbacks.\\n if isinstance(item, Callable):\\n item(formatter)\\n else:\\n words = (actions.user.replace_phrases(actions.dictate.parse_words(item))\\n if isinstance(item, grammar.vm.Phrase)\\n else [item])\\n for word in words:\\n result += formatter.format(word)\\n return result\\n\\n# There must be a simpler way to do this, but I don't see it right now.\\nno_space_after = re.compile(r\\\"\\\"\\\"\\n (?:\\n [\\\\s\\\\-_/#@([{‘“] # characters that never need space after them\\n | (? bool:\\n return not text or no_space_before.search(text)\\ndef omit_space_after(text: str) -> bool:\\n return not text or no_space_after.search(text)\\ndef needs_space_between(before: str, after: str) -> bool:\\n return not (omit_space_after(before) or omit_space_before(after))\\n\\n# # TESTS, uncomment to enable\\n# assert needs_space_between(\\\"a\\\", \\\"break\\\")\\n# assert needs_space_between(\\\"break\\\", \\\"a\\\")\\n# assert needs_space_between(\\\".\\\", \\\"a\\\")\\n# assert needs_space_between(\\\"said\\\", \\\"'hello\\\")\\n# assert needs_space_between(\\\"hello'\\\", \\\"said\\\")\\n# assert needs_space_between(\\\"hello.\\\", \\\"'John\\\")\\n# assert needs_space_between(\\\"John.'\\\", \\\"They\\\")\\n# assert needs_space_between(\\\"paid\\\", \\\"$50\\\")\\n# assert needs_space_between(\\\"50$\\\", \\\"payment\\\")\\n# assert not needs_space_between(\\\"\\\", \\\"\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\"\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\" \\\")\\n# assert not needs_space_between(\\\"\\\", \\\"a\\\")\\n# assert not needs_space_between(\\\" \\\", \\\"a\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\",\\\")\\n# assert not needs_space_between(\\\"'\\\", \\\"a\\\")\\n# assert not needs_space_between(\\\"a\\\", \\\"'\\\")\\n# assert not needs_space_between(\\\"and-\\\", \\\"or\\\")\\n# assert not needs_space_between(\\\"mary\\\", \\\"-kate\\\")\\n# assert not needs_space_between(\\\"$\\\", \\\"50\\\")\\n# assert not needs_space_between(\\\"US\\\", \\\"$\\\")\\n# assert not needs_space_between(\\\"(\\\", \\\")\\\")\\n# assert not needs_space_between(\\\"(\\\", \\\"e.g.\\\")\\n# assert not needs_space_between(\\\"example\\\", \\\")\\\")\\n# assert not needs_space_between(\\\"example\\\", '\\\".')\\n# assert not needs_space_between(\\\"example\\\", '.\\\"')\\n# assert not needs_space_between(\\\"hello'\\\", \\\".\\\")\\n# assert not needs_space_between(\\\"hello.\\\", \\\"'\\\")\\n\\ndef auto_capitalize(text, state = None):\\n \\\"\\\"\\\"\\n Auto-capitalizes text. `state` argument means:\\n\\n - None: Don't capitalize initial word.\\n - \\\"sentence start\\\": Capitalize initial word.\\n - \\\"after newline\\\": Don't capitalize initial word, but we're after a newline.\\n Used for double-newline detection.\\n\\n Returns (capitalized text, updated state).\\n \\\"\\\"\\\"\\n output = \\\"\\\"\\n # Imagine a metaphorical \\\"capitalization charge\\\" travelling through the\\n # string left-to-right.\\n charge = state == \\\"sentence start\\\"\\n newline = state == \\\"after newline\\\"\\n for c in text:\\n # Sentence endings & double newlines create a charge.\\n if c in \\\".!?\\\" or (newline and c == \\\"\\\\n\\\"):\\n charge = True\\n # Alphanumeric characters and commas/colons absorb charge & try to\\n # capitalize (for numbers & punctuation this does nothing, which is what\\n # we want).\\n elif charge and (c.isalnum() or c in \\\",:\\\"):\\n charge = False\\n c = c.capitalize()\\n # Otherwise the charge just passes through.\\n output += c\\n newline = c == \\\"\\\\n\\\"\\n return output, (\\\"sentence start\\\" if charge else\\n \\\"after newline\\\" if newline else None)\\n\\n\\f\\n# ---------- DICTATION AUTO FORMATTING ---------- #\\nclass DictationFormat:\\n def __init__(self):\\n self.reset()\\n\\n def reset(self):\\n self.reset_context()\\n self.force_no_space = False\\n self.force_capitalization = None # Can also be \\\"cap\\\" or \\\"no cap\\\".\\n\\n def reset_context(self):\\n self.before = \\\"\\\"\\n self.state = \\\"sentence start\\\"\\n\\n def update_context(self, before):\\n if before is None: return\\n self.reset_context()\\n self.pass_through(before)\\n\\n def pass_through(self, text):\\n _, self.state = auto_capitalize(text, self.state)\\n self.before = text or self.before\\n\\n def format(self, text, auto_cap=True):\\n if not self.force_no_space and needs_space_between(self.before, text):\\n text = \\\" \\\" + text\\n self.force_no_space = False\\n if auto_cap:\\n text, self.state = auto_capitalize(text, self.state)\\n if self.force_capitalization == \\\"cap\\\":\\n text = format_first_letter(text, lambda s: s.capitalize())\\n self.force_capitalization = None\\n if self.force_capitalization == \\\"no cap\\\":\\n text = format_first_letter(text, lambda s: s.lower())\\n self.force_capitalization = None\\n self.before = text or self.before\\n return text\\n\\n # These are used as callbacks by prose modifiers / dictation_mode commands.\\n def cap(self): self.force_capitalization = \\\"cap\\\"\\n def no_cap(self): self.force_capitalization = \\\"no cap\\\"\\n def no_space(self):\\n # This is typically used after repositioning the cursor, so it is helpful to\\n # reset capitalization as well.\\n #\\n # FIXME: this sets state to \\\"sentence start\\\", capitalizing the next\\n # word. probably undesirable, since most places are not the start of\\n # sentences?\\n self.reset_context()\\n self.force_no_space = True\\n\\ndef format_first_letter(text, formatter):\\n i = -1\\n for i, c in enumerate(text):\\n if c.isalpha():\\n break\\n if i >= 0 and i < len(text):\\n text = text[:i] + formatter(text[i]) + text[i+1:]\\n return text\\n\\ndictation_formatter = DictationFormat()\\nui.register(\\\"app_deactivate\\\", lambda app: dictation_formatter.reset())\\nui.register(\\\"win_focus\\\", lambda win: dictation_formatter.reset())\\n\\ndef reformat_last_utterance(formatter):\\n text = actions.user.get_last_phrase()\\n actions.user.clear_last_phrase()\\n text = formatter(text)\\n actions.user.add_phrase_to_history(text)\\n actions.insert(text)\\n\\n@mod.action_class\\nclass Actions:\\n def dictation_format_reset():\\n \\\"\\\"\\\"Resets the dictation formatter\\\"\\\"\\\"\\n return dictation_formatter.reset()\\n\\n def dictation_format_cap():\\n \\\"\\\"\\\"Sets the dictation formatter to capitalize\\\"\\\"\\\"\\n dictation_formatter.cap()\\n\\n def dictation_format_no_cap():\\n \\\"\\\"\\\"Sets the dictation formatter to not capitalize\\\"\\\"\\\"\\n dictation_formatter.no_cap()\\n\\n def dictation_format_no_space():\\n \\\"\\\"\\\"Sets the dictation formatter to not prepend a space\\\"\\\"\\\"\\n dictation_formatter.no_space()\\n\\n def dictation_reformat_cap():\\n \\\"\\\"\\\"Capitalizes the last utterance\\\"\\\"\\\"\\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.capitalize()))\\n\\n def dictation_reformat_no_cap():\\n \\\"\\\"\\\"Lowercases the last utterance\\\"\\\"\\\"\\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.lower()))\\n\\n def dictation_reformat_no_space():\\n \\\"\\\"\\\"Removes space before the last utterance\\\"\\\"\\\"\\n reformat_last_utterance(lambda s: s[1:] if s.startswith(\\\" \\\") else s)\\n\\n def dictation_insert_raw(text: str):\\n \\\"\\\"\\\"Inserts text as-is, without invoking the dictation formatter.\\\"\\\"\\\"\\n actions.user.dictation_insert(text, auto_cap=False)\\n\\n def dictation_insert(text: str, auto_cap: bool=True) -> str:\\n \\\"\\\"\\\"Inserts dictated text, formatted appropriately.\\\"\\\"\\\"\\n add_space_after = False\\n if setting_context_sensitive_dictation.get():\\n # Peek left if we might need leading space or auto-capitalization.\\n if (not omit_space_before(text)\\n or text != auto_capitalize(text, \\\"sentence start\\\")[0]):\\n dictation_formatter.update_context(\\n actions.user.dictation_peek_left(clobber=True))\\n # Peek right if we might need trailing space. NB. We peek right\\n # BEFORE insertion to avoid breaking the undo-chain between the\\n # inserted text and the trailing space.\\n if not omit_space_after(text):\\n char = actions.user.dictation_peek_right()\\n add_space_after = char is not None and needs_space_between(text, char)\\n text = dictation_formatter.format(text, auto_cap)\\n # Straighten curly quotes that were introduced to obtain proper\\n # spacing. The formatter context still has the original curly quotes\\n # so that future dictation is properly formatted.\\n text = text.replace(\\\"“\\\", \\\"\\\\\\\"\\\").replace(\\\"”\\\", \\\"\\\\\\\"\\\")\\n actions.user.add_phrase_to_history(text)\\n # we insert the text all at once in case we have an implementation of\\n # insert that is more efficient for long strings, eg. paste-to-insert\\n actions.insert(text + (\\\" \\\" if add_space_after else \\\"\\\"))\\n if add_space_after: actions.edit.left()\\n\\n def dictation_peek_left(clobber: bool = False) -> Optional[str]:\\n \\\"\\\"\\\"\\n Tries to get some text before the cursor, ideally a word or two, for the\\n purpose of auto-spacing & -capitalization. Results are not guaranteed;\\n dictation_peek_left() may return None to indicate no information. (Note\\n that returning the empty string \\\"\\\" indicates there is nothing before\\n cursor, ie. we are at the beginning of the document.)\\n\\n If there is currently a selection, dictation_peek_left() must leave it\\n unchanged unless `clobber` is true, in which case it may clobber it.\\n \\\"\\\"\\\"\\n # Get rid of the selection if it exists.\\n if clobber: actions.user.clobber_selection_if_exists()\\n # Otherwise, if there's a selection, fail.\\n elif \\\"\\\" != actions.edit.selected_text(): return None\\n\\n # In principle the previous word should suffice, but some applications\\n # have a funny concept of what the previous word is (for example, they\\n # may only take the \\\"`\\\" at the end of \\\"`foo`\\\"). To be double sure we\\n # take two words left. I also tried taking a line up + a word left, but\\n # edit.extend_up() = key(shift-up) doesn't work consistently in the\\n # Slack webapp (sometimes escapes the text box).\\n actions.edit.extend_word_left()\\n actions.edit.extend_word_left()\\n text = actions.edit.selected_text()\\n # if we're at the beginning of the document/text box, we may not have\\n # selected any text, in which case we shouldn't move the cursor.\\n if text:\\n # Unfortunately, in web Slack, if our selection ends at newline,\\n # this will go right over the newline. Argh.\\n actions.edit.right()\\n return text\\n\\n def clobber_selection_if_exists():\\n \\\"\\\"\\\"Deletes the currently selected text if it exists; otherwise does nothing.\\\"\\\"\\\"\\n actions.key(\\\"space backspace\\\")\\n # This space-backspace trick is fast and reliable but has the\\n # side-effect of cluttering the undo history. Other options:\\n #\\n # 1. Call edit.cut() inside a clip.revert() block. This assumes\\n # edit.cut() is supported AND will be a no-op if there's no\\n # selection. Unfortunately, sometimes one or both of these is false,\\n # eg. the notion webapp makes ctrl-x cut the current block by default\\n # if nothing is selected.\\n #\\n # 2. Test whether a selection exists by asking whether\\n # edit.selected_text() is empty; if it does, use edit.delete(). This\\n # usually uses the clipboard, which can be quite slow. Also, not sure\\n # how this would interact with switching edit.selected_text() to use\\n # the selection clipboard on linux, which can be nonempty even if no\\n # text is selected in the current application.\\n #\\n # Perhaps this ought to be configurable by a setting.\\n\\n def dictation_peek_right() -> Optional[str]:\\n \\\"\\\"\\\"\\n Tries to get a few characters after the cursor for auto-spacing.\\n Results are not guaranteed; dictation_peek_right() may return None to\\n indicate no information. (Note that returning the empty string \\\"\\\"\\n indicates there is nothing after cursor, ie. we are at the end of the\\n document.)\\n \\\"\\\"\\\"\\n # We grab two characters because I think that's what no_space_before\\n # needs in the worst case. An example where the second character matters\\n # is inserting before (1) \\\"' hello\\\" vs (2) \\\"'hello\\\". In case (1) we\\n # don't want to add space, in case (2) we do.\\n actions.edit.extend_right()\\n actions.edit.extend_right()\\n after = actions.edit.selected_text()\\n if after: actions.edit.left()\\n return after\\n\\n# Use the dictation formatter in dictation mode.\\ndictation_ctx = Context()\\ndictation_ctx.matches = r\\\"\\\"\\\"\\nmode: dictation\\n\\\"\\\"\\\"\\n\\n@dictation_ctx.action_class(\\\"main\\\")\\nclass main_action:\\n def auto_insert(text):\\n actions.user.dictation_insert(actions.auto_format(text))\\n\",\n \"path\": \"code/dictation.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/code/dictation.py b/code/dictation.py\nindex c0229af1ac..d25fb9c506 100644\n--- a/code/dictation.py\n+++ b/code/dictation.py\n@@ -398,4 +398,5 @@ def dictation_peek_right() -> Optional[str]:\n \n @dictation_ctx.action_class(\"main\")\n class main_action:\n- def auto_insert(text): actions.user.dictation_insert(text)\n+ def auto_insert(text):\n+ actions.user.dictation_insert(actions.auto_format(text))\n"}}},{"rowIdx":420,"cells":{"in_source_id":{"kind":"string","value":"mlflow__mlflow-4368"},"issue":{"kind":"string","value":"Make mlflow compatible with protobuf 3.6.1\n## What changes are proposed in this pull request?\r\n\r\nMake mlflow compatible with protobuf 3.6.1:\r\nfor protobuf ==3.6.1\r\nAdd EnumTypeWrapper.__getattr__ to access values\r\n\r\n## How is this patch tested?\r\n\r\nN/A\r\n\r\n## Release Notes\r\n\r\n### Is this a user-facing change?\r\n\r\n- [x] No. You can skip the rest of this section.\r\n- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users.\r\n\r\n(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.)\r\n\r\n### What component(s), interfaces, languages, and integrations does this PR affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\r\n\r\n\r\n### How should the PR be classified in the release notes? Choose one:\r\n\r\n- [ ] `rn/breaking-change` - The PR will be mentioned in the \"Breaking Changes\" section\r\n- [x] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the \"Small Bugfixes and Documentation Updates\" section\r\n- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes\r\n- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes\r\n- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes\r\n\n"},"before_files":{"kind":"list like","value":[{"content":"import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.6.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"import os\\nimport logging\\n\\nfrom importlib.machinery import SourceFileLoader\\nfrom setuptools import setup, find_packages\\n\\n_MLFLOW_SKINNY_ENV_VAR = \\\"MLFLOW_SKINNY\\\"\\n\\nversion = (\\n SourceFileLoader(\\\"mlflow.version\\\", os.path.join(\\\"mlflow\\\", \\\"version.py\\\")).load_module().VERSION\\n)\\n\\n\\n# Get a list of all files in the JS directory to include in our module\\ndef package_files(directory):\\n paths = []\\n for (path, _, filenames) in os.walk(directory):\\n for filename in filenames:\\n paths.append(os.path.join(\\\"..\\\", path, filename))\\n return paths\\n\\n\\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\\n# to include in the wheel, e.g. \\\"../mlflow/server/js/build/index.html\\\"\\njs_files = package_files(\\\"mlflow/server/js/build\\\")\\nmodels_container_server_files = package_files(\\\"mlflow/models/container\\\")\\nalembic_files = [\\n \\\"../mlflow/store/db_migrations/alembic.ini\\\",\\n \\\"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\\\",\\n]\\nextra_files = [\\\"ml-package-versions.yml\\\", \\\"pyspark/ml/log_model_allowlist.txt\\\"]\\n\\n\\\"\\\"\\\"\\nMinimal requirements for the skinny MLflow client which provides a limited\\nsubset of functionality such as: RESTful client functionality for Tracking and\\nModel Registry, as well as support for Project execution against local backends\\nand Databricks.\\n\\\"\\\"\\\"\\nSKINNY_REQUIREMENTS = [\\n \\\"click>=7.0\\\",\\n \\\"cloudpickle\\\",\\n \\\"databricks-cli>=0.8.7\\\",\\n \\\"entrypoints\\\",\\n \\\"gitpython>=2.1.0\\\",\\n \\\"pyyaml\\\",\\n \\\"protobuf>=3.6.0\\\",\\n \\\"pytz\\\",\\n \\\"requests>=2.17.3\\\",\\n \\\"packaging\\\",\\n]\\n\\n\\\"\\\"\\\"\\nThese are the core requirements for the complete MLflow platform, which augments\\nthe skinny client functionality with support for running the MLflow Tracking\\nServer & UI. It also adds project backends such as Docker and Kubernetes among\\nother capabilities.\\n\\\"\\\"\\\"\\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\\n \\\"alembic<=1.4.1\\\",\\n # Required\\n \\\"docker>=4.0.0\\\",\\n \\\"Flask\\\",\\n \\\"gunicorn; platform_system != 'Windows'\\\",\\n \\\"numpy\\\",\\n \\\"pandas\\\",\\n \\\"prometheus-flask-exporter\\\",\\n \\\"querystring_parser\\\",\\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\\n \\\"sqlparse>=0.3.1\\\",\\n # Required to run the MLflow server against SQL-backed storage\\n \\\"sqlalchemy\\\",\\n \\\"waitress; platform_system == 'Windows'\\\",\\n]\\n\\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\\nlogging.debug(\\\"{} env var is set: {}\\\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\\n\\nsetup(\\n name=\\\"mlflow\\\" if not _is_mlflow_skinny else \\\"mlflow-skinny\\\",\\n version=version,\\n packages=find_packages(exclude=[\\\"tests\\\", \\\"tests.*\\\"]),\\n package_data={\\\"mlflow\\\": js_files + models_container_server_files + alembic_files + extra_files}\\n if not _is_mlflow_skinny\\n # include alembic files to enable usage of the skinny client with SQL databases\\n # if users install sqlalchemy, alembic, and sqlparse independently\\n else {\\\"mlflow\\\": alembic_files + extra_files},\\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\\n extras_require={\\n \\\"extras\\\": [\\n \\\"scikit-learn\\\",\\n # Required to log artifacts and models to HDFS artifact locations\\n \\\"pyarrow\\\",\\n # Required to log artifacts and models to AWS S3 artifact locations\\n \\\"boto3\\\",\\n \\\"mleap\\\",\\n # Required to log artifacts and models to GCS artifact locations\\n \\\"google-cloud-storage\\\",\\n \\\"azureml-core>=1.2.0\\\",\\n # Required to log artifacts to SFTP artifact locations\\n \\\"pysftp\\\",\\n # Required by the mlflow.projects module, when running projects against\\n # a remote Kubernetes cluster\\n \\\"kubernetes\\\",\\n ],\\n \\\"sqlserver\\\": [\\\"mlflow-dbstore\\\"],\\n \\\"aliyun-oss\\\": [\\\"aliyunstoreplugin\\\"],\\n },\\n entry_points=\\\"\\\"\\\"\\n [console_scripts]\\n mlflow=mlflow.cli:cli\\n \\\"\\\"\\\",\\n zip_safe=False,\\n author=\\\"Databricks\\\",\\n description=\\\"MLflow: A Platform for ML Development and Productionization\\\",\\n long_description=open(\\\"README.rst\\\").read()\\n if not _is_mlflow_skinny\\n else open(\\\"README_SKINNY.rst\\\").read() + open(\\\"README.rst\\\").read(),\\n long_description_content_type=\\\"text/x-rst\\\",\\n license=\\\"Apache License 2.0\\\",\\n classifiers=[\\\"Intended Audience :: Developers\\\", \\\"Programming Language :: Python :: 3.6\\\"],\\n keywords=\\\"ml ai databricks\\\",\\n url=\\\"https://mlflow.org/\\\",\\n python_requires=\\\">=3.6\\\",\\n project_urls={\\n \\\"Bug Tracker\\\": \\\"https://github.com/mlflow/mlflow/issues\\\",\\n \\\"Documentation\\\": \\\"https://mlflow.org/docs/latest/index.html\\\",\\n \\\"Source Code\\\": \\\"https://github.com/mlflow/mlflow\\\",\\n },\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.7.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"import os\\nimport logging\\n\\nfrom importlib.machinery import SourceFileLoader\\nfrom setuptools import setup, find_packages\\n\\n_MLFLOW_SKINNY_ENV_VAR = \\\"MLFLOW_SKINNY\\\"\\n\\nversion = (\\n SourceFileLoader(\\\"mlflow.version\\\", os.path.join(\\\"mlflow\\\", \\\"version.py\\\")).load_module().VERSION\\n)\\n\\n\\n# Get a list of all files in the JS directory to include in our module\\ndef package_files(directory):\\n paths = []\\n for (path, _, filenames) in os.walk(directory):\\n for filename in filenames:\\n paths.append(os.path.join(\\\"..\\\", path, filename))\\n return paths\\n\\n\\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\\n# to include in the wheel, e.g. \\\"../mlflow/server/js/build/index.html\\\"\\njs_files = package_files(\\\"mlflow/server/js/build\\\")\\nmodels_container_server_files = package_files(\\\"mlflow/models/container\\\")\\nalembic_files = [\\n \\\"../mlflow/store/db_migrations/alembic.ini\\\",\\n \\\"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\\\",\\n]\\nextra_files = [\\\"ml-package-versions.yml\\\", \\\"pyspark/ml/log_model_allowlist.txt\\\"]\\n\\n\\\"\\\"\\\"\\nMinimal requirements for the skinny MLflow client which provides a limited\\nsubset of functionality such as: RESTful client functionality for Tracking and\\nModel Registry, as well as support for Project execution against local backends\\nand Databricks.\\n\\\"\\\"\\\"\\nSKINNY_REQUIREMENTS = [\\n \\\"click>=7.0\\\",\\n \\\"cloudpickle\\\",\\n \\\"databricks-cli>=0.8.7\\\",\\n \\\"entrypoints\\\",\\n \\\"gitpython>=2.1.0\\\",\\n \\\"pyyaml\\\",\\n \\\"protobuf>=3.7.0\\\",\\n \\\"pytz\\\",\\n \\\"requests>=2.17.3\\\",\\n \\\"packaging\\\",\\n]\\n\\n\\\"\\\"\\\"\\nThese are the core requirements for the complete MLflow platform, which augments\\nthe skinny client functionality with support for running the MLflow Tracking\\nServer & UI. It also adds project backends such as Docker and Kubernetes among\\nother capabilities.\\n\\\"\\\"\\\"\\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\\n \\\"alembic<=1.4.1\\\",\\n # Required\\n \\\"docker>=4.0.0\\\",\\n \\\"Flask\\\",\\n \\\"gunicorn; platform_system != 'Windows'\\\",\\n \\\"numpy\\\",\\n \\\"pandas\\\",\\n \\\"prometheus-flask-exporter\\\",\\n \\\"querystring_parser\\\",\\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\\n \\\"sqlparse>=0.3.1\\\",\\n # Required to run the MLflow server against SQL-backed storage\\n \\\"sqlalchemy\\\",\\n \\\"waitress; platform_system == 'Windows'\\\",\\n]\\n\\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\\nlogging.debug(\\\"{} env var is set: {}\\\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\\n\\nsetup(\\n name=\\\"mlflow\\\" if not _is_mlflow_skinny else \\\"mlflow-skinny\\\",\\n version=version,\\n packages=find_packages(exclude=[\\\"tests\\\", \\\"tests.*\\\"]),\\n package_data={\\\"mlflow\\\": js_files + models_container_server_files + alembic_files + extra_files}\\n if not _is_mlflow_skinny\\n # include alembic files to enable usage of the skinny client with SQL databases\\n # if users install sqlalchemy, alembic, and sqlparse independently\\n else {\\\"mlflow\\\": alembic_files + extra_files},\\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\\n extras_require={\\n \\\"extras\\\": [\\n \\\"scikit-learn\\\",\\n # Required to log artifacts and models to HDFS artifact locations\\n \\\"pyarrow\\\",\\n # Required to log artifacts and models to AWS S3 artifact locations\\n \\\"boto3\\\",\\n \\\"mleap\\\",\\n # Required to log artifacts and models to GCS artifact locations\\n \\\"google-cloud-storage\\\",\\n \\\"azureml-core>=1.2.0\\\",\\n # Required to log artifacts to SFTP artifact locations\\n \\\"pysftp\\\",\\n # Required by the mlflow.projects module, when running projects against\\n # a remote Kubernetes cluster\\n \\\"kubernetes\\\",\\n ],\\n \\\"sqlserver\\\": [\\\"mlflow-dbstore\\\"],\\n \\\"aliyun-oss\\\": [\\\"aliyunstoreplugin\\\"],\\n },\\n entry_points=\\\"\\\"\\\"\\n [console_scripts]\\n mlflow=mlflow.cli:cli\\n \\\"\\\"\\\",\\n zip_safe=False,\\n author=\\\"Databricks\\\",\\n description=\\\"MLflow: A Platform for ML Development and Productionization\\\",\\n long_description=open(\\\"README.rst\\\").read()\\n if not _is_mlflow_skinny\\n else open(\\\"README_SKINNY.rst\\\").read() + open(\\\"README.rst\\\").read(),\\n long_description_content_type=\\\"text/x-rst\\\",\\n license=\\\"Apache License 2.0\\\",\\n classifiers=[\\\"Intended Audience :: Developers\\\", \\\"Programming Language :: Python :: 3.6\\\"],\\n keywords=\\\"ml ai databricks\\\",\\n url=\\\"https://mlflow.org/\\\",\\n python_requires=\\\">=3.6\\\",\\n project_urls={\\n \\\"Bug Tracker\\\": \\\"https://github.com/mlflow/mlflow/issues\\\",\\n \\\"Documentation\\\": \\\"https://mlflow.org/docs/latest/index.html\\\",\\n \\\"Source Code\\\": \\\"https://github.com/mlflow/mlflow\\\",\\n },\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex 1a98478b5e3d4..186d2f67fbf30 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@ def package_files(directory):\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n- \"protobuf>=3.6.0\",\n+ \"protobuf>=3.7.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n"}}},{"rowIdx":421,"cells":{"in_source_id":{"kind":"string","value":"google__turbinia-616"},"issue":{"kind":"string","value":"Add retries to tox\nTox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.\n"},"before_files":{"kind":"list like","value":[{"content":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n","path":"docs/conf.py"}],"string":"[\n {\n \"content\": \"# Configuration file for the Sphinx documentation builder.\\n#\\n# This file only contains a selection of the most common options. For a full\\n# list see the documentation:\\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\\n\\n# -- Path setup --------------------------------------------------------------\\n\\n# If extensions (or modules to document with autodoc) are in another directory,\\n# add these directories to sys.path here. If the directory is relative to the\\n# documentation root, use os.path.abspath to make it absolute, like shown here.\\n#\\n# import os\\n# import sys\\n# sys.path.insert(0, os.path.abspath('.'))\\n\\nfrom __future__ import unicode_literals\\nimport re\\n\\nfrom recommonmark.parser import CommonMarkParser\\nfrom recommonmark.transform import AutoStructify\\nfrom docutils import nodes, transforms\\n\\n# -- Project information -----------------------------------------------------\\n\\nproject = 'Turbinia'\\ncopyright = '2020, Google Inc'\\nauthor = 'Turbinia maintainers'\\n\\n# -- General configuration ---------------------------------------------------\\n\\n# Add any Sphinx extension module names here, as strings. They can be\\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\\n# ones.\\nextensions = [\\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\\n 'recommonmark'\\n]\\n\\n# Add any paths that contain templates here, relative to this directory.\\ntemplates_path = ['_templates']\\n\\n# List of patterns, relative to source directory, that match files and\\n# directories to ignore when looking for source files.\\n# This pattern also affects html_static_path and html_extra_path.\\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\\n\\n# -- Options for HTML output -------------------------------------------------\\n\\n# The theme to use for HTML and HTML Help pages. See the documentation for\\n# a list of builtin themes.\\n#\\nhtml_theme = 'sphinx_rtd_theme'\\n\\n# The master toctree document.\\nmaster_doc = 'index'\\n\\n# The name of the Pygments (syntax highlighting) style to use.\\npygments_style = 'sphinx'\\n\\n# Add any paths that contain custom static files (such as style sheets) here,\\n# relative to this directory. They are copied after the builtin static files,\\n# so a file named \\\"default.css\\\" will overwrite the builtin \\\"default.css\\\".\\nhtml_static_path = ['_static']\\n\\n# The default sidebars (for documents that don't match any pattern) are\\n# defined by theme itself. Builtin themes are using these templates by\\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\\n# 'searchbox.html']``.\\n#\\nhtml_sidebars = {\\n '**': [\\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\\n 'searchbox.html'\\n ]\\n}\\n\\n\\n# Output file base name for HTML help builder.\\nhtmlhelp_basename = 'turbiniadoc'\\n\\nhtml_logo = \\\"images/turbinia-logo.jpg\\\"\\n\\n\\nclass ProcessLink(transforms.Transform):\\n \\\"\\\"\\\"Transform definition to parse .md references to internal pages.\\\"\\\"\\\"\\n\\n default_priority = 1000\\n\\n def find_replace(self, node):\\n \\\"\\\"\\\"Parses URIs containing .md and replaces them with their HTML page.\\\"\\\"\\\"\\n if isinstance(node, nodes.reference) and 'refuri' in node:\\n r = node['refuri']\\n if r.endswith('.md'):\\n r = r[:-3] + '.html'\\n node['refuri'] = r\\n\\n return node\\n\\n def traverse(self, node):\\n \\\"\\\"\\\"Traverse the document tree rooted at node.\\n node : docutil node\\n current root node to traverse\\n \\\"\\\"\\\"\\n self.find_replace(node)\\n\\n for c in node.children:\\n self.traverse(c)\\n\\n # pylint: disable=arguments-differ,attribute-defined-outside-init\\n # this was taken from GRR's config file for documentation\\n def apply(self):\\n self.current_level = 0\\n self.traverse(self.document)\\n\\n\\ndef setup(app):\\n \\\"\\\"\\\"Add custom parsers to Sphinx generation.\\\"\\\"\\\"\\n app.add_config_value(\\n 'recommonmark_config', {\\n 'enable_auto_doc_ref': False,\\n }, True)\\n app.add_transform(AutoStructify)\\n app.add_transform(ProcessLink)\\n\",\n \"path\": \"docs/conf.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n# Adding retries to linkchecks before declaring a link broken\nlinkcheck_retries = 3\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n","path":"docs/conf.py"}],"string":"[\n {\n \"content\": \"# Configuration file for the Sphinx documentation builder.\\n#\\n# This file only contains a selection of the most common options. For a full\\n# list see the documentation:\\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\\n\\n# -- Path setup --------------------------------------------------------------\\n\\n# If extensions (or modules to document with autodoc) are in another directory,\\n# add these directories to sys.path here. If the directory is relative to the\\n# documentation root, use os.path.abspath to make it absolute, like shown here.\\n#\\n# import os\\n# import sys\\n# sys.path.insert(0, os.path.abspath('.'))\\n\\nfrom __future__ import unicode_literals\\nimport re\\n\\nfrom recommonmark.parser import CommonMarkParser\\nfrom recommonmark.transform import AutoStructify\\nfrom docutils import nodes, transforms\\n\\n# -- Project information -----------------------------------------------------\\n\\nproject = 'Turbinia'\\ncopyright = '2020, Google Inc'\\nauthor = 'Turbinia maintainers'\\n\\n# -- General configuration ---------------------------------------------------\\n\\n# Add any Sphinx extension module names here, as strings. They can be\\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\\n# ones.\\nextensions = [\\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\\n 'recommonmark'\\n]\\n\\n# Add any paths that contain templates here, relative to this directory.\\ntemplates_path = ['_templates']\\n\\n# List of patterns, relative to source directory, that match files and\\n# directories to ignore when looking for source files.\\n# This pattern also affects html_static_path and html_extra_path.\\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\\n\\n# -- Options for HTML output -------------------------------------------------\\n\\n# The theme to use for HTML and HTML Help pages. See the documentation for\\n# a list of builtin themes.\\n#\\nhtml_theme = 'sphinx_rtd_theme'\\n\\n# The master toctree document.\\nmaster_doc = 'index'\\n\\n# The name of the Pygments (syntax highlighting) style to use.\\npygments_style = 'sphinx'\\n\\n# Add any paths that contain custom static files (such as style sheets) here,\\n# relative to this directory. They are copied after the builtin static files,\\n# so a file named \\\"default.css\\\" will overwrite the builtin \\\"default.css\\\".\\nhtml_static_path = ['_static']\\n\\n# The default sidebars (for documents that don't match any pattern) are\\n# defined by theme itself. Builtin themes are using these templates by\\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\\n# 'searchbox.html']``.\\n#\\nhtml_sidebars = {\\n '**': [\\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\\n 'searchbox.html'\\n ]\\n}\\n\\n# Adding retries to linkchecks before declaring a link broken\\nlinkcheck_retries = 3\\n\\n# Output file base name for HTML help builder.\\nhtmlhelp_basename = 'turbiniadoc'\\n\\nhtml_logo = \\\"images/turbinia-logo.jpg\\\"\\n\\n\\nclass ProcessLink(transforms.Transform):\\n \\\"\\\"\\\"Transform definition to parse .md references to internal pages.\\\"\\\"\\\"\\n\\n default_priority = 1000\\n\\n def find_replace(self, node):\\n \\\"\\\"\\\"Parses URIs containing .md and replaces them with their HTML page.\\\"\\\"\\\"\\n if isinstance(node, nodes.reference) and 'refuri' in node:\\n r = node['refuri']\\n if r.endswith('.md'):\\n r = r[:-3] + '.html'\\n node['refuri'] = r\\n\\n return node\\n\\n def traverse(self, node):\\n \\\"\\\"\\\"Traverse the document tree rooted at node.\\n node : docutil node\\n current root node to traverse\\n \\\"\\\"\\\"\\n self.find_replace(node)\\n\\n for c in node.children:\\n self.traverse(c)\\n\\n # pylint: disable=arguments-differ,attribute-defined-outside-init\\n # this was taken from GRR's config file for documentation\\n def apply(self):\\n self.current_level = 0\\n self.traverse(self.document)\\n\\n\\ndef setup(app):\\n \\\"\\\"\\\"Add custom parsers to Sphinx generation.\\\"\\\"\\\"\\n app.add_config_value(\\n 'recommonmark_config', {\\n 'enable_auto_doc_ref': False,\\n }, True)\\n app.add_transform(AutoStructify)\\n app.add_transform(ProcessLink)\\n\",\n \"path\": \"docs/conf.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/docs/conf.py b/docs/conf.py\nindex 68dbdd555..a0c2084bb 100644\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -76,6 +76,8 @@\n ]\n }\n \n+# Adding retries to linkchecks before declaring a link broken\n+linkcheck_retries = 3\n \n # Output file base name for HTML help builder.\n htmlhelp_basename = 'turbiniadoc'\n"}}},{"rowIdx":422,"cells":{"in_source_id":{"kind":"string","value":"getmoto__moto-1992"},"issue":{"kind":"string","value":"Replace pyaml dependency with PyYAML\nThere is a dependency on pyaml in setup.py:\r\n\r\nhttps://github.com/spulec/moto/blob/master/setup.py#L18\r\n\r\nI think that this is intended to be PyYAML (which pyaml depends on), and I do not see any usages of pyaml itself in this codebase.\r\n\r\npyaml uses WTFPL (https://github.com/mk-fg/pretty-yaml/blob/master/COPYING) which is not approved by the OSI (https://opensource.org/minutes20090304)\n"},"before_files":{"kind":"list like","value":[{"content":"#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail.com',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#!/usr/bin/env python\\nfrom __future__ import unicode_literals\\nimport setuptools\\nfrom setuptools import setup, find_packages\\nimport sys\\n\\n\\ninstall_requires = [\\n \\\"Jinja2>=2.7.3\\\",\\n \\\"boto>=2.36.0\\\",\\n \\\"boto3>=1.6.16\\\",\\n \\\"botocore>=1.12.13\\\",\\n \\\"cryptography>=2.3.0\\\",\\n \\\"requests>=2.5\\\",\\n \\\"xmltodict\\\",\\n \\\"six>1.9\\\",\\n \\\"werkzeug\\\",\\n \\\"pyaml\\\",\\n \\\"pytz\\\",\\n \\\"python-dateutil<3.0.0,>=2.1\\\",\\n \\\"python-jose<3.0.0\\\",\\n \\\"mock\\\",\\n \\\"docker>=2.5.1\\\",\\n \\\"jsondiff==1.1.1\\\",\\n \\\"aws-xray-sdk!=0.96,>=0.93\\\",\\n \\\"responses>=0.9.0\\\",\\n]\\n\\nextras_require = {\\n 'server': ['flask'],\\n}\\n\\n# https://hynek.me/articles/conditional-python-dependencies/\\nif int(setuptools.__version__.split(\\\".\\\", 1)[0]) < 18:\\n if sys.version_info[0:2] < (3, 3):\\n install_requires.append(\\\"backports.tempfile\\\")\\nelse:\\n extras_require[\\\":python_version<'3.3'\\\"] = [\\\"backports.tempfile\\\"]\\n\\n\\nsetup(\\n name='moto',\\n version='1.3.7',\\n description='A library that allows your python tests to easily'\\n ' mock out the boto library',\\n author='Steve Pulec',\\n author_email='spulec@gmail.com',\\n url='https://github.com/spulec/moto',\\n entry_points={\\n 'console_scripts': [\\n 'moto_server = moto.server:main',\\n ],\\n },\\n packages=find_packages(exclude=(\\\"tests\\\", \\\"tests.*\\\")),\\n install_requires=install_requires,\\n extras_require=extras_require,\\n include_package_data=True,\\n license=\\\"Apache\\\",\\n test_suite=\\\"tests\\\",\\n classifiers=[\\n \\\"Programming Language :: Python :: 2\\\",\\n \\\"Programming Language :: Python :: 2.7\\\",\\n \\\"Programming Language :: Python :: 3\\\",\\n \\\"Programming Language :: Python :: 3.3\\\",\\n \\\"Programming Language :: Python :: 3.4\\\",\\n \\\"Programming Language :: Python :: 3.5\\\",\\n \\\"Programming Language :: Python :: 3.6\\\",\\n \\\"License :: OSI Approved :: Apache Software License\\\",\\n \\\"Topic :: Software Development :: Testing\\\",\\n ],\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"PyYAML\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail.com',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#!/usr/bin/env python\\nfrom __future__ import unicode_literals\\nimport setuptools\\nfrom setuptools import setup, find_packages\\nimport sys\\n\\n\\ninstall_requires = [\\n \\\"Jinja2>=2.7.3\\\",\\n \\\"boto>=2.36.0\\\",\\n \\\"boto3>=1.6.16\\\",\\n \\\"botocore>=1.12.13\\\",\\n \\\"cryptography>=2.3.0\\\",\\n \\\"requests>=2.5\\\",\\n \\\"xmltodict\\\",\\n \\\"six>1.9\\\",\\n \\\"werkzeug\\\",\\n \\\"PyYAML\\\",\\n \\\"pytz\\\",\\n \\\"python-dateutil<3.0.0,>=2.1\\\",\\n \\\"python-jose<3.0.0\\\",\\n \\\"mock\\\",\\n \\\"docker>=2.5.1\\\",\\n \\\"jsondiff==1.1.1\\\",\\n \\\"aws-xray-sdk!=0.96,>=0.93\\\",\\n \\\"responses>=0.9.0\\\",\\n]\\n\\nextras_require = {\\n 'server': ['flask'],\\n}\\n\\n# https://hynek.me/articles/conditional-python-dependencies/\\nif int(setuptools.__version__.split(\\\".\\\", 1)[0]) < 18:\\n if sys.version_info[0:2] < (3, 3):\\n install_requires.append(\\\"backports.tempfile\\\")\\nelse:\\n extras_require[\\\":python_version<'3.3'\\\"] = [\\\"backports.tempfile\\\"]\\n\\n\\nsetup(\\n name='moto',\\n version='1.3.7',\\n description='A library that allows your python tests to easily'\\n ' mock out the boto library',\\n author='Steve Pulec',\\n author_email='spulec@gmail.com',\\n url='https://github.com/spulec/moto',\\n entry_points={\\n 'console_scripts': [\\n 'moto_server = moto.server:main',\\n ],\\n },\\n packages=find_packages(exclude=(\\\"tests\\\", \\\"tests.*\\\")),\\n install_requires=install_requires,\\n extras_require=extras_require,\\n include_package_data=True,\\n license=\\\"Apache\\\",\\n test_suite=\\\"tests\\\",\\n classifiers=[\\n \\\"Programming Language :: Python :: 2\\\",\\n \\\"Programming Language :: Python :: 2.7\\\",\\n \\\"Programming Language :: Python :: 3\\\",\\n \\\"Programming Language :: Python :: 3.3\\\",\\n \\\"Programming Language :: Python :: 3.4\\\",\\n \\\"Programming Language :: Python :: 3.5\\\",\\n \\\"Programming Language :: Python :: 3.6\\\",\\n \\\"License :: OSI Approved :: Apache Software License\\\",\\n \\\"Topic :: Software Development :: Testing\\\",\\n ],\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex a1b8c5daecbe..0598d7a10aa7 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n- \"pyaml\",\n+ \"PyYAML\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n"}}},{"rowIdx":423,"cells":{"in_source_id":{"kind":"string","value":"Nitrate__Nitrate-527"},"issue":{"kind":"string","value":"Remove dependency mock\nUse `unittest.mock` instead.\n"},"before_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='qcxhome@gmail.com',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n\\nfrom setuptools import setup, find_packages\\n\\n\\nwith open('VERSION.txt', 'r') as f:\\n pkg_version = f.read().strip()\\n\\n\\ndef get_long_description():\\n with open('README.rst', 'r') as f:\\n return f.read()\\n\\n\\ninstall_requires = [\\n 'beautifulsoup4 >= 4.1.1',\\n 'django >= 2.0,<3.0',\\n 'django-contrib-comments == 1.9.1',\\n 'django-tinymce == 2.7.0',\\n 'django-uuslug == 1.1.8',\\n 'html2text',\\n 'odfpy >= 0.9.6',\\n 'python-bugzilla',\\n 'xmltodict',\\n 'kobo == 0.9.0'\\n]\\n\\nextras_require = {\\n 'mysql': ['mysqlclient >= 1.2.3'],\\n 'pgsql': ['psycopg2 == 2.7.5'],\\n\\n # Required for tcms.auth.backends.KerberosBackend\\n 'krbauth': [\\n 'kerberos == 1.2.5'\\n ],\\n\\n # Packages for building documentation\\n 'docs': [\\n 'Sphinx >= 1.1.2',\\n 'sphinx_rtd_theme',\\n ],\\n\\n # Necessary packages for running tests\\n 'tests': [\\n 'beautifulsoup4',\\n 'coverage',\\n 'factory_boy',\\n 'flake8',\\n 'mock',\\n 'pytest',\\n 'pytest-cov',\\n 'pytest-django',\\n ],\\n\\n # Contain tools that assists the development\\n 'devtools': [\\n 'django-debug-toolbar',\\n 'tox',\\n 'django-extensions',\\n 'pygraphviz',\\n ],\\n\\n # Required packages required to run async tasks\\n 'async': [\\n 'celery == 4.2.0',\\n ],\\n\\n 'multiauth': [\\n 'social-auth-app-django == 3.1.0',\\n ]\\n}\\n\\nsetup(\\n name='nitrate-tcms',\\n version=pkg_version,\\n description='A full-featured Test Case Management System',\\n long_description=get_long_description(),\\n author='Nitrate Team',\\n maintainer='Chenxiong Qi',\\n maintainer_email='qcxhome@gmail.com',\\n url='https://github.com/Nitrate/Nitrate/',\\n license='GPLv2+',\\n keywords='test case',\\n install_requires=install_requires,\\n extras_require=extras_require,\\n python_requires='>=3.6',\\n package_dir={'': 'src'},\\n packages=find_packages('src', exclude=['test*']),\\n include_package_data=True,\\n zip_safe=False,\\n classifiers=[\\n 'Framework :: Django',\\n 'Framework :: Django :: 2.0',\\n 'Framework :: Django :: 2.1',\\n 'Framework :: Django :: 2.2',\\n 'Intended Audience :: Developers',\\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\\n 'Programming Language :: Python :: 3',\\n 'Programming Language :: Python :: 3.6',\\n 'Programming Language :: Python :: 3.7',\\n 'Programming Language :: Python :: 3 :: Only',\\n 'Topic :: Software Development :: Quality Assurance',\\n 'Topic :: Software Development :: Testing',\\n ],\\n project_urls={\\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\\n 'Documentation': 'https://nitrate.readthedocs.io/',\\n },\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='qcxhome@gmail.com',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n\\nfrom setuptools import setup, find_packages\\n\\n\\nwith open('VERSION.txt', 'r') as f:\\n pkg_version = f.read().strip()\\n\\n\\ndef get_long_description():\\n with open('README.rst', 'r') as f:\\n return f.read()\\n\\n\\ninstall_requires = [\\n 'beautifulsoup4 >= 4.1.1',\\n 'django >= 2.0,<3.0',\\n 'django-contrib-comments == 1.9.1',\\n 'django-tinymce == 2.7.0',\\n 'django-uuslug == 1.1.8',\\n 'html2text',\\n 'odfpy >= 0.9.6',\\n 'python-bugzilla',\\n 'xmltodict',\\n 'kobo == 0.9.0'\\n]\\n\\nextras_require = {\\n 'mysql': ['mysqlclient >= 1.2.3'],\\n 'pgsql': ['psycopg2 == 2.7.5'],\\n\\n # Required for tcms.auth.backends.KerberosBackend\\n 'krbauth': [\\n 'kerberos == 1.2.5'\\n ],\\n\\n # Packages for building documentation\\n 'docs': [\\n 'Sphinx >= 1.1.2',\\n 'sphinx_rtd_theme',\\n ],\\n\\n # Necessary packages for running tests\\n 'tests': [\\n 'beautifulsoup4',\\n 'coverage',\\n 'factory_boy',\\n 'flake8',\\n 'pytest',\\n 'pytest-cov',\\n 'pytest-django',\\n ],\\n\\n # Contain tools that assists the development\\n 'devtools': [\\n 'django-debug-toolbar',\\n 'tox',\\n 'django-extensions',\\n 'pygraphviz',\\n ],\\n\\n # Required packages required to run async tasks\\n 'async': [\\n 'celery == 4.2.0',\\n ],\\n\\n 'multiauth': [\\n 'social-auth-app-django == 3.1.0',\\n ]\\n}\\n\\nsetup(\\n name='nitrate-tcms',\\n version=pkg_version,\\n description='A full-featured Test Case Management System',\\n long_description=get_long_description(),\\n author='Nitrate Team',\\n maintainer='Chenxiong Qi',\\n maintainer_email='qcxhome@gmail.com',\\n url='https://github.com/Nitrate/Nitrate/',\\n license='GPLv2+',\\n keywords='test case',\\n install_requires=install_requires,\\n extras_require=extras_require,\\n python_requires='>=3.6',\\n package_dir={'': 'src'},\\n packages=find_packages('src', exclude=['test*']),\\n include_package_data=True,\\n zip_safe=False,\\n classifiers=[\\n 'Framework :: Django',\\n 'Framework :: Django :: 2.0',\\n 'Framework :: Django :: 2.1',\\n 'Framework :: Django :: 2.2',\\n 'Intended Audience :: Developers',\\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\\n 'Programming Language :: Python :: 3',\\n 'Programming Language :: Python :: 3.6',\\n 'Programming Language :: Python :: 3.7',\\n 'Programming Language :: Python :: 3 :: Only',\\n 'Topic :: Software Development :: Quality Assurance',\\n 'Topic :: Software Development :: Testing',\\n ],\\n project_urls={\\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\\n 'Documentation': 'https://nitrate.readthedocs.io/',\\n },\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex 80906912..f0b2be6b 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,6 @@ def get_long_description():\n 'coverage',\n 'factory_boy',\n 'flake8',\n- 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\ndiff --git a/src/tests/core/test_commands.py b/src/tests/core/test_commands.py\nindex a375629c..b2a83775 100644\n--- a/src/tests/core/test_commands.py\n+++ b/src/tests/core/test_commands.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from django.contrib.auth.models import Group\n-from mock import patch\n+from unittest.mock import patch\n from django import test\n from django.core.management import call_command\n \ndiff --git a/src/tests/core/test_core.py b/src/tests/core/test_core.py\nindex 96388ed9..194a1ac9 100644\n--- a/src/tests/core/test_core.py\n+++ b/src/tests/core/test_core.py\n@@ -2,8 +2,7 @@\n \n import sys\n import unittest\n-from mock import patch\n-from mock import Mock\n+from unittest.mock import patch, Mock\n \n from django import test\n from django.conf import settings\ndiff --git a/src/tests/core/test_files.py b/src/tests/core/test_files.py\nindex 62e0aa46..0a449f4b 100644\n--- a/src/tests/core/test_files.py\n+++ b/src/tests/core/test_files.py\n@@ -11,7 +11,7 @@\n from django.test import RequestFactory\n from django.urls import reverse\n from django.conf import settings\n-from mock import patch\n+from unittest.mock import patch\n \n from tcms.core.files import able_to_delete_attachment\n from tcms.management.models import TestAttachment\ndiff --git a/src/tests/issuetracker/test_services.py b/src/tests/issuetracker/test_services.py\nindex 408000b4..f8f5716e 100644\n--- a/src/tests/issuetracker/test_services.py\n+++ b/src/tests/issuetracker/test_services.py\n@@ -3,7 +3,7 @@\n import unittest\n \n from django import test\n-from mock import Mock\n+from unittest.mock import Mock\n \n from tcms.issuetracker import services\n from tcms.issuetracker.services import IssueTrackerService\ndiff --git a/src/tests/issuetracker/test_task.py b/src/tests/issuetracker/test_task.py\nindex e8f8b3f5..8e4fb306 100644\n--- a/src/tests/issuetracker/test_task.py\n+++ b/src/tests/issuetracker/test_task.py\n@@ -2,7 +2,7 @@\n \n import unittest\n \n-from mock import patch, Mock\n+from unittest.mock import patch, Mock\n from tcms.issuetracker.task import bugzilla_external_track\n \n \ndiff --git a/src/tests/test_auth.py b/src/tests/test_auth.py\nindex 1806ae05..5810c48b 100644\n--- a/src/tests/test_auth.py\n+++ b/src/tests/test_auth.py\n@@ -3,7 +3,7 @@\n import datetime\n \n from hashlib import sha1\n-from mock import patch\n+from unittest.mock import patch\n \n from django.conf import settings\n from django.contrib.auth.models import User\ndiff --git a/src/tests/testcases/test_models.py b/src/tests/testcases/test_models.py\nindex b1f25852..0814583b 100644\n--- a/src/tests/testcases/test_models.py\n+++ b/src/tests/testcases/test_models.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from datetime import timedelta\n-from mock import patch\n+from unittest.mock import patch\n \n from django.contrib.auth.models import User\n from django.core import mail\ndiff --git a/src/tests/testcases/test_views.py b/src/tests/testcases/test_views.py\nindex 7b58d714..fd8caedf 100644\n--- a/src/tests/testcases/test_views.py\n+++ b/src/tests/testcases/test_views.py\n@@ -7,8 +7,7 @@\n from bs4 import BeautifulSoup\n from datetime import datetime, timedelta\n from operator import attrgetter, itemgetter\n-\n-import mock\n+from unittest.mock import patch\n \n from django import test\n from django.contrib.auth.models import User\n@@ -62,13 +61,13 @@ def test_get_plan_id_from_post_request(self):\n pk = plan_from_request_or_none(request, pk_enough=True)\n self.assertEqual(1, pk)\n \n- @mock.patch('tcms.testcases.views.get_object_or_404')\n+ @patch('tcms.testcases.views.get_object_or_404')\n def test_get_plan_object_from_get_request(self, get_object_or_404):\n request = self.factory.get('/uri', data={'from_plan': 1})\n plan = plan_from_request_or_none(request)\n self.assertEqual(get_object_or_404.return_value, plan)\n \n- @mock.patch('tcms.testcases.views.get_object_or_404')\n+ @patch('tcms.testcases.views.get_object_or_404')\n def test_get_plan_object_from_post_request(self, get_object_or_404):\n request = self.factory.post('/uri', data={'from_plan': 1})\n plan = plan_from_request_or_none(request)\n@@ -84,14 +83,14 @@ def test_missing_plan_id_in_post_request(self):\n plan = plan_from_request_or_none(request)\n self.assertIsNone(plan)\n \n- @mock.patch('tcms.testcases.views.get_object_or_404')\n+ @patch('tcms.testcases.views.get_object_or_404')\n def test_nonexisting_plan_id_from_get_request(self, get_object_or_404):\n get_object_or_404.side_effect = Http404\n \n request = self.factory.get('/uri', data={'from_plan': 1})\n self.assertRaises(Http404, plan_from_request_or_none, request)\n \n- @mock.patch('tcms.testcases.views.get_object_or_404')\n+ @patch('tcms.testcases.views.get_object_or_404')\n def test_nonexisting_plan_id_from_post_request(self, get_object_or_404):\n get_object_or_404.side_effect = Http404\n \n@@ -352,7 +351,7 @@ def test_fail_to_remove_if_component_not_exist(self):\n data = json.loads(resp.content)\n self.assertIn('Cannot remove component', data['response'])\n \n- @mock.patch('tcms.testcases.models.TestCase.remove_component')\n+ @patch('tcms.testcases.models.TestCase.remove_component')\n def test_case_remove_component_fails(self, remove_component):\n remove_component.side_effect = Exception\n \n@@ -682,8 +681,8 @@ def test_remove_tags_from_cases(self):\n TestCaseTag.objects.filter(\n case=self.case_3.pk, tag=self.tag_python.pk).exists())\n \n- @mock.patch('tcms.testcases.models.TestCase.remove_tag',\n- side_effect=ValueError('value error'))\n+ @patch('tcms.testcases.models.TestCase.remove_tag',\n+ side_effect=ValueError('value error'))\n def test_ensure_response_if_error_happens_when_remove_tag(self, remove_tag):\n # This test does not care about what tags are removed from which cases\n response = self.client.post(\n@@ -1535,7 +1534,7 @@ def test_create_new_plan_case_rel_sort_key_for_link(self):\n orig_plan=self.orphan_plan,\n copy_case=False)\n \n- @mock.patch('tcms.testplans.models.TestPlan.get_case_sortkey')\n+ @patch('tcms.testplans.models.TestPlan.get_case_sortkey')\n def test_clone_to_same_plan(self, get_case_sortkey):\n # Make it easier to assert the new sort key.\n get_case_sortkey.return_value = 100\n@@ -1783,7 +1782,7 @@ def test_invalid_arguments(self):\n data = json.loads(resp.content)\n self.assertIn('Cannot add component', data['response'])\n \n- @mock.patch('tcms.testcases.models.TestCase.add_component')\n+ @patch('tcms.testcases.models.TestCase.add_component')\n def test_failed_to_add_component(self, add_component):\n add_component.side_effect = ValueError\n \n@@ -1933,7 +1932,7 @@ def test_invalid_input_for_adding_an_issue(self):\n ['Invalid issue tracker that does not exist.'],\n error_messages)\n \n- @mock.patch('tcms.testcases.models.TestCase.add_issue')\n+ @patch('tcms.testcases.models.TestCase.add_issue')\n def test_fail_if_case_add_issue_fails(self, add_issue):\n add_issue.side_effect = Exception('Something wrong')\n \n@@ -2437,14 +2436,14 @@ def setUpTestData(cls):\n cls.case_run_1.notes = 'Some notes'\n cls.case_run_1.save()\n \n- with mock.patch('django.utils.timezone.now') as mock_now:\n+ with patch('django.utils.timezone.now') as mock_now:\n cls.submit_date = datetime(2020, 1, 22, 19, 47, 30)\n mock_now.return_value = cls.submit_date\n add_comment(\n cls.tester, 'testruns.testcaserun', [cls.case_run_1.pk],\n 'first comment')\n \n- with mock.patch('django.utils.timezone.now') as mock_now:\n+ with patch('django.utils.timezone.now') as mock_now:\n cls.submit_date_later = cls.submit_date + timedelta(minutes=10)\n mock_now.return_value = cls.submit_date_later\n add_comment(\ndiff --git a/src/tests/testplans/test_forms.py b/src/tests/testplans/test_forms.py\nindex e16cb49d..996df0cd 100644\n--- a/src/tests/testplans/test_forms.py\n+++ b/src/tests/testplans/test_forms.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n import unittest\n-from mock import Mock\n+from unittest.mock import Mock\n \n from tcms.testplans.forms import UploadedHTMLFile\n \ndiff --git a/src/tests/testplans/test_models.py b/src/tests/testplans/test_models.py\nindex 7f9e604f..2daec6a7 100644\n--- a/src/tests/testplans/test_models.py\n+++ b/src/tests/testplans/test_models.py\n@@ -2,7 +2,7 @@\n \n from django.conf import settings\n from django import test\n-from mock import patch\n+from unittest.mock import patch\n \n from tcms.testplans.helpers import email\n from tcms.testplans.models import _disconnect_signals, TestPlan\ndiff --git a/src/tests/testruns/test_views.py b/src/tests/testruns/test_views.py\nindex 48426be7..1a9d7329 100644\n--- a/src/tests/testruns/test_views.py\n+++ b/src/tests/testruns/test_views.py\n@@ -7,7 +7,7 @@\n from http import HTTPStatus\n from operator import attrgetter\n \n-from mock import patch\n+from unittest.mock import patch\n from xml.etree import ElementTree\n \n from django.db.models import Max\ndiff --git a/src/tests/xmlrpc/test_testcase.py b/src/tests/xmlrpc/test_testcase.py\nindex 7dfe0bfe..e4451228 100644\n--- a/src/tests/xmlrpc/test_testcase.py\n+++ b/src/tests/xmlrpc/test_testcase.py\n@@ -7,7 +7,7 @@\n from django_comments.models import Comment\n from django.db.models import Max, Min\n from django import test\n-from mock import patch, PropertyMock\n+from unittest.mock import patch, PropertyMock\n \n from tcms.core.utils import checksum\n from tcms.issuetracker.models import Issue\ndiff --git a/src/tests/xmlrpc/test_user.py b/src/tests/xmlrpc/test_user.py\nindex 09d16574..9a7b3332 100644\n--- a/src/tests/xmlrpc/test_user.py\n+++ b/src/tests/xmlrpc/test_user.py\n@@ -1,6 +1,6 @@\n # -*- coding: utf-8 -*-\n \n-from mock import patch\n+from unittest.mock import patch\n \n from django.contrib.auth.models import User\n from django.test import TestCase\n"}}},{"rowIdx":424,"cells":{"in_source_id":{"kind":"string","value":"flask-admin__flask-admin-1068"},"issue":{"kind":"string","value":"Tests failing due to new Pillow 3.0.0\nTests are currently failing when a tiff file (https://github.com/flask-admin/flask-admin/blob/master/flask_admin/tests/data/copyleft.jpg) is uploaded:\n\n```\n======================================================================\nFAIL: flask_admin.tests.test_form_upload.test_image_upload_field\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/nose/case.py\", line 197, in runTest\n self.test(*self.arg)\n File \"/home/travis/build/flask-admin/flask-admin/flask_admin/tests/test_form_upload.py\", line 225, in test_image_upload_field\n ok_(my_form.validate())\nAssertionError: None\n>> raise AssertionError(None)\n```\n\nThe issue has to do with the new 3.0.0 version of Pillow. I started an issue for it here: https://github.com/python-pillow/Pillow/issues/1466\n\nA temporary solution might be to lock Pillow to 2.9.0. I can submit a pull request if that's what we decide to do: https://github.com/flask-admin/flask-admin/compare/master...pawl:fix_pillow_build\n\n"},"before_files":{"kind":"list like","value":[{"content":"# Fix for older setuptools\nimport re\nimport os\n\nfrom setuptools import setup, find_packages\n\n\ndef fpath(name):\n return os.path.join(os.path.dirname(__file__), name)\n\n\ndef read(fname):\n return open(fpath(fname)).read()\n\n\ndef desc():\n info = read('README.rst')\n try:\n return info + '\\n\\n' + read('doc/changelog.rst')\n except IOError:\n return info\n\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\nfile_text = read(fpath('flask_admin/__init__.py'))\n\n\ndef grep(attrname):\n pattern = r\"{0}\\W*=\\W*'([^']+)'\".format(attrname)\n strval, = re.findall(pattern, file_text)\n return strval\n\n\nsetup(\n name='Flask-Admin',\n version=grep('__version__'),\n url='https://github.com/flask-admin/flask-admin/',\n license='BSD',\n author=grep('__author__'),\n author_email=grep('__email__'),\n description='Simple and extensible admin interface framework for Flask',\n long_description=desc(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=[\n 'Flask>=0.7',\n 'wtforms'\n ],\n tests_require=[\n 'nose>=1.0',\n 'pillow',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n 'sqlalchemy',\n 'flask-mongoengine',\n 'flask-sqlalchemy',\n 'flask-babelex',\n 'shapely',\n 'geoalchemy2',\n 'psycopg2',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n test_suite='nose.collector'\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"# Fix for older setuptools\\nimport re\\nimport os\\n\\nfrom setuptools import setup, find_packages\\n\\n\\ndef fpath(name):\\n return os.path.join(os.path.dirname(__file__), name)\\n\\n\\ndef read(fname):\\n return open(fpath(fname)).read()\\n\\n\\ndef desc():\\n info = read('README.rst')\\n try:\\n return info + '\\\\n\\\\n' + read('doc/changelog.rst')\\n except IOError:\\n return info\\n\\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\\nfile_text = read(fpath('flask_admin/__init__.py'))\\n\\n\\ndef grep(attrname):\\n pattern = r\\\"{0}\\\\W*=\\\\W*'([^']+)'\\\".format(attrname)\\n strval, = re.findall(pattern, file_text)\\n return strval\\n\\n\\nsetup(\\n name='Flask-Admin',\\n version=grep('__version__'),\\n url='https://github.com/flask-admin/flask-admin/',\\n license='BSD',\\n author=grep('__author__'),\\n author_email=grep('__email__'),\\n description='Simple and extensible admin interface framework for Flask',\\n long_description=desc(),\\n packages=find_packages(),\\n include_package_data=True,\\n zip_safe=False,\\n platforms='any',\\n install_requires=[\\n 'Flask>=0.7',\\n 'wtforms'\\n ],\\n tests_require=[\\n 'nose>=1.0',\\n 'pillow',\\n 'mongoengine',\\n 'pymongo',\\n 'wtf-peewee',\\n 'sqlalchemy',\\n 'flask-mongoengine',\\n 'flask-sqlalchemy',\\n 'flask-babelex',\\n 'shapely',\\n 'geoalchemy2',\\n 'psycopg2',\\n ],\\n classifiers=[\\n 'Development Status :: 4 - Beta',\\n 'Environment :: Web Environment',\\n 'Intended Audience :: Developers',\\n 'License :: OSI Approved :: BSD License',\\n 'Operating System :: OS Independent',\\n 'Programming Language :: Python',\\n 'Topic :: Software Development :: Libraries :: Python Modules',\\n 'Programming Language :: Python :: 2.7',\\n 'Programming Language :: Python :: 2.6',\\n 'Programming Language :: Python :: 3.3',\\n 'Programming Language :: Python :: 3.4',\\n ],\\n test_suite='nose.collector'\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Fix for older setuptools\nimport re\nimport os\n\nfrom setuptools import setup, find_packages\n\n\ndef fpath(name):\n return os.path.join(os.path.dirname(__file__), name)\n\n\ndef read(fname):\n return open(fpath(fname)).read()\n\n\ndef desc():\n info = read('README.rst')\n try:\n return info + '\\n\\n' + read('doc/changelog.rst')\n except IOError:\n return info\n\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\nfile_text = read(fpath('flask_admin/__init__.py'))\n\n\ndef grep(attrname):\n pattern = r\"{0}\\W*=\\W*'([^']+)'\".format(attrname)\n strval, = re.findall(pattern, file_text)\n return strval\n\n\nsetup(\n name='Flask-Admin',\n version=grep('__version__'),\n url='https://github.com/flask-admin/flask-admin/',\n license='BSD',\n author=grep('__author__'),\n author_email=grep('__email__'),\n description='Simple and extensible admin interface framework for Flask',\n long_description=desc(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=[\n 'Flask>=0.7',\n 'wtforms'\n ],\n tests_require=[\n 'nose>=1.0',\n 'pillow==2.9.0',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n 'sqlalchemy',\n 'flask-mongoengine',\n 'flask-sqlalchemy',\n 'flask-babelex',\n 'shapely',\n 'geoalchemy2',\n 'psycopg2',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n test_suite='nose.collector'\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"# Fix for older setuptools\\nimport re\\nimport os\\n\\nfrom setuptools import setup, find_packages\\n\\n\\ndef fpath(name):\\n return os.path.join(os.path.dirname(__file__), name)\\n\\n\\ndef read(fname):\\n return open(fpath(fname)).read()\\n\\n\\ndef desc():\\n info = read('README.rst')\\n try:\\n return info + '\\\\n\\\\n' + read('doc/changelog.rst')\\n except IOError:\\n return info\\n\\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\\nfile_text = read(fpath('flask_admin/__init__.py'))\\n\\n\\ndef grep(attrname):\\n pattern = r\\\"{0}\\\\W*=\\\\W*'([^']+)'\\\".format(attrname)\\n strval, = re.findall(pattern, file_text)\\n return strval\\n\\n\\nsetup(\\n name='Flask-Admin',\\n version=grep('__version__'),\\n url='https://github.com/flask-admin/flask-admin/',\\n license='BSD',\\n author=grep('__author__'),\\n author_email=grep('__email__'),\\n description='Simple and extensible admin interface framework for Flask',\\n long_description=desc(),\\n packages=find_packages(),\\n include_package_data=True,\\n zip_safe=False,\\n platforms='any',\\n install_requires=[\\n 'Flask>=0.7',\\n 'wtforms'\\n ],\\n tests_require=[\\n 'nose>=1.0',\\n 'pillow==2.9.0',\\n 'mongoengine',\\n 'pymongo',\\n 'wtf-peewee',\\n 'sqlalchemy',\\n 'flask-mongoengine',\\n 'flask-sqlalchemy',\\n 'flask-babelex',\\n 'shapely',\\n 'geoalchemy2',\\n 'psycopg2',\\n ],\\n classifiers=[\\n 'Development Status :: 4 - Beta',\\n 'Environment :: Web Environment',\\n 'Intended Audience :: Developers',\\n 'License :: OSI Approved :: BSD License',\\n 'Operating System :: OS Independent',\\n 'Programming Language :: Python',\\n 'Topic :: Software Development :: Libraries :: Python Modules',\\n 'Programming Language :: Python :: 2.7',\\n 'Programming Language :: Python :: 2.6',\\n 'Programming Language :: Python :: 3.3',\\n 'Programming Language :: Python :: 3.4',\\n ],\\n test_suite='nose.collector'\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/examples/forms/requirements.txt b/examples/forms/requirements.txt\nindex 1d4552c1d..3c503e3c4 100644\n--- a/examples/forms/requirements.txt\n+++ b/examples/forms/requirements.txt\n@@ -1,4 +1,4 @@\n Flask\n Flask-Admin\n Flask-SQLAlchemy\n-pillow\n\\ No newline at end of file\n+pillow==2.9.0\ndiff --git a/requirements-dev.txt b/requirements-dev.txt\nindex 7d96bdb70..a2435b77b 100644\n--- a/requirements-dev.txt\n+++ b/requirements-dev.txt\n@@ -5,7 +5,7 @@ peewee\n wtf-peewee\n pymongo==2.8\n flask-mongoengine\n-pillow\n+pillow==2.9.0\n Babel<=1.3\n flask-babelex\n shapely==1.5.9\ndiff --git a/setup.py b/setup.py\nindex d46d6345e..b527c8dde 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -49,7 +49,7 @@ def grep(attrname):\n ],\n tests_require=[\n 'nose>=1.0',\n- 'pillow',\n+ 'pillow==2.9.0',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n"}}},{"rowIdx":425,"cells":{"in_source_id":{"kind":"string","value":"google-research__t5x-475"},"issue":{"kind":"string","value":"Seg Fault after saving checkpoints\nHi, \r\n\r\nI am getting a seg fault sometimes after the model has saved the checkpoint. It is not every checkpoint and seems to be random which checkpoints it crashes after. I am not sure if it is related to issue #340\r\n\r\nFor example, I am running `prompt_tuning/scripts/sst2-demo-xxl.sh`, and the output is below. \r\n\r\n```\r\n317 18:14:56.525280 140415323761728 utils.py:138] Saved Numpy Arrays for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/numpy_checkpoints/checkpoint_1104000\r\nI0317 18:14:56.604028 140415323761728 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nI0317 18:14:56.614308 140622481194048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nI0317 18:14:56.624289 140590966570048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nI0317 18:14:56.653718 140272509271104 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nFatal Python error: Segmentation fault\r\n\r\n\r\nThread 0x00007fdb1dc01700 (most recent call first):\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/jax/interpreters/pxla.py\", line 664 in _sda_value\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/jax/_src/device_array.py\", line 266 in __array__\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py\", line 447 in \r\n File \"/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoint_importer.py\", line 84 in get\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 57 in run\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 80 in _worker\r\n File \"/usr/lib/python3.8/threading.py\", line 870 in run\r\n File \"/usr/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/usr/lib/python3.8/threading.py\", line 890 in _bootstrap\r\n\r\nThread 0x00007f56809df700 (most recent call first):\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 78 in _worker\r\n File \"/usr/lib/python3.8/threading.py\", line 870 in run\r\n File \"/usr/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/usr/lib/python3.8/threading.py\", line 890 in _bootstrap\r\n\r\n Thread 0x00007f56c7aad700 (most recent call first):\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 78 in _worker\r\n File \"/usr/lib/python3.8/threading.py\", line 870 in run\r\n File \"/usr/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/usr/lib/python3.8/threading.py\", line 890 in _bootstrap\r\nThread 0x00007fdde29efc40 (most recent call first):\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py\", line 693 in _write_array\r\nhttps://symbolize.stripped_domain/r/?trace=7fdde2e4203b,7fdde2e420bf,e,5ef27540f,e,26f7c5aff,f,b15f59df&map= \r\nE0317 18:14:57.770066 341059 process_state.cc:1062] RAW: Signal 11 raised at PC: 0x7fdde2e4203b while already in FailureSignalHandler!\r\nE0317 18:14:57.770096 341059 process_state.cc:1065] RAW: tid: 341059 raised new signal\r\n @ 0xf 1440 (unknown)\r\n @ 0x25ed159b0 (unknown) (unknown)\r\n @ 0x10 76231216 (unknown)\r\n @ 0x261cdc840 (unknown) (unknown)\r\n @ 0x2dfdd4780 (unknown) (unknown)\r\n @ 0x5f1f8a120 (unknown) (unknown)\r\nhttps://symbolize.stripped_domain/r/?trace=7fdde301ffd3,7fddd98d57f9,7fdde2e420bf,7,e,25ed159af,f,261cdc83f,2dfdd477f,5f1f8a11f&map=7a511a57244151c993b16b37978e7ed7:7fddcaefd000-7fddd9c3fd50 \r\nE0317 18:14:57.818885 341068 coredump_hook.cc:365] RAW: Remote crash data gathering hook invoked.\r\nE0317 18:14:57.818900 341068 coredump_hook.cc:411] RAW: Skipping coredump since rlimit was 0 at process start.\r\nE0317 18:14:57.818919 341068 client.cc:221] RAW: Coroner client retries enabled (b/136286901), will retry for up to 30 sec.\r\nE0317 18:14:57.818922 341068 coredump_hook.cc:473] RAW: Sending fingerprint to remote end.\r\nE0317 18:14:57.818928 341068 coredump_socket.cc:124] RAW: Stat failed errno=2 on socket /var/google/services/logmanagerd/remote_coredump.socket\r\nE0317 18:14:57.818933 341068 coredump_hook.cc:477] RAW: Cannot send fingerprint to Coroner: [NOT_FOUND] Missing crash reporting socket. Is the listener running?\r\nE0317 18:14:57.818938 341068 coredump_hook.cc:550] RAW: Discarding core.\r\nprompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 337643 Segmentation fault (core dumped) python3 -m t5x.train --gin_search_paths=\"${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}\" --gin_file=\"prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin\" --gin_file=\"prompt_tuning/configs/prompts/from_class_labels.gin\" --gin_file=\"prompt_tuning/configs/runs/prompt_finetune.gin\" --gin.CLASS_LABELS=\"['positive', 'negative']\" --gin.MODEL_DIR=\"'${MODEL_DIR}'\" --gin.MIXTURE_OR_TASK_NAME=\"'taskless_glue_sst2_v200_examples'\" --gin.MIXTURE_OR_TASK_MODULE=\"'prompt_tuning.data.glue'\" --gin.TASK_FEATURE_LENGTHS=\"{'inputs': 512, 'targets': 8}\" --gin.INITIAL_CHECKPOINT_PATH=\"'${PRETRAINED_MODEL}'\" --gin.TRAIN_STEPS=\"1_212_000\" --gin.USE_CACHED_TASKS=\"False\" --gin.BATCH_SIZE=\"16\" --gin.partitioning.PjitPartitioner.model_parallel_submesh=\"(4, 4, 1, 2)\" --tfds_data_dir=${TFDS_DATA_DIR}\r\n##### Command execution on worker 3 failed with return code 139. Continuing.\r\nprompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 334750 Aborted (core dumped) python3 -m t5x.train --gin_search_paths=\"${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}\" --gin_file=\"prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin\" --gin_file=\"prompt_tuning/configs/prompts/from_class_labels.gin\" --gin_file=\"prompt_tuning/configs/runs/prompt_finetune.gin\" --gin.CLASS_LABELS=\"['positive', 'negative']\" --gin.MODEL_DIR=\"'${MODEL_DIR}'\" --gin.MIXTURE_OR_TASK_NAME=\"'taskless_glue_sst2_v200_examples'\" --gin.MIXTURE_OR_TASK_MODULE=\"'prompt_tuning.data.glue'\" --gin.TASK_FEATURE_LENGTHS=\"{'inputs': 512, 'targets': 8}\" --gin.INITIAL_CHECKPOINT_PATH=\"'${PRETRAINED_MODEL}'\" --gin.TRAIN_STEPS=\"1_212_000\" --gin.USE_CACHED_TASKS=\"False\" --gin.BATCH_SIZE=\"16\" --gin.partitioning.PjitPartitioner.model_parallel_submesh=\"(4, 4, 1, 2)\" --tfds_data_dir=${TFDS_DATA_DIR}\r\n##### Command execution on worker 1 failed with return code 134. Continuing.\r\nprompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 335504 Aborted (core dumped) python3 -m t5x.train --gin_search_paths=\"${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}\" --gin_file=\"prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin\" --gin_file=\"prompt_tuning/configs/prompts/from_class_labels.gin\" --gin_file=\"prompt_tuning/configs/runs/prompt_finetune.gin\" --gin.CLASS_LABELS=\"['positive', 'negative']\" --gin.MODEL_DIR=\"'${MODEL_DIR}'\" --gin.MIXTURE_OR_TASK_NAME=\"'taskless_glue_sst2_v200_examples'\" --gin.MIXTURE_OR_TASK_MODULE=\"'prompt_tuning.data.glue'\" --gin.TASK_FEATURE_LENGTHS=\"{'inputs': 512, 'targets': 8}\" --gin.INITIAL_CHECKPOINT_PATH=\"'${PRETRAINED_MODEL}'\" --gin.TRAIN_STEPS=\"1_212_000\" --gin.USE_CACHED_TASKS=\"False\" --gin.BATCH_SIZE=\"16\" --gin.partitioning.PjitPartitioner.model_parallel_submesh=\"(4, 4, 1, 2)\" --tfds_data_dir=${TFDS_DATA_DIR}\r\n##### Command execution on worker 0 failed with return code 134. Continuing.\r\n```\r\n\r\nThanks\n"},"before_files":{"kind":"list like","value":[{"content":"# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5X.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n_jax_version = '0.2.27'\n_jaxlib_version = '0.1.76'\n\nsetuptools.setup(\n name='t5x',\n version=__version__,\n description='T5-eXtended in JAX',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='no-reply@google.com',\n url='http://github.com/google-research/t5x',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'cached_property',\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\n 'flax @ git+https://github.com/google/flax#egg=flax',\n 'gin-config',\n f'jax >= {_jax_version}',\n f'jaxlib >= {_jaxlib_version}',\n 'numpy',\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n 'tensorstore',\n ],\n extras_require={\n 'gcp': [\n 'gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'\n ],\n 'test': ['pytest'],\n\n # Cloud TPU requirements.\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"# Copyright 2022 The T5X Authors.\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\n\\\"\\\"\\\"Install T5X.\\\"\\\"\\\"\\n\\nimport os\\nimport sys\\nimport setuptools\\n\\n# To enable importing version.py directly, we add its path to sys.path.\\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\\nsys.path.append(version_path)\\nfrom version import __version__ # pylint: disable=g-import-not-at-top\\n\\n# Get the long description from the README file.\\nwith open('README.md') as fp:\\n _LONG_DESCRIPTION = fp.read()\\n\\n_jax_version = '0.2.27'\\n_jaxlib_version = '0.1.76'\\n\\nsetuptools.setup(\\n name='t5x',\\n version=__version__,\\n description='T5-eXtended in JAX',\\n long_description=_LONG_DESCRIPTION,\\n long_description_content_type='text/markdown',\\n author='Google Inc.',\\n author_email='no-reply@google.com',\\n url='http://github.com/google-research/t5x',\\n license='Apache 2.0',\\n packages=setuptools.find_packages(),\\n package_data={\\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\\n },\\n scripts=[],\\n install_requires=[\\n 'absl-py',\\n 'cached_property',\\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\\n 'flax @ git+https://github.com/google/flax#egg=flax',\\n 'gin-config',\\n f'jax >= {_jax_version}',\\n f'jaxlib >= {_jaxlib_version}',\\n 'numpy',\\n 'seqio-nightly',\\n 't5',\\n 'tensorflow',\\n 'tensorstore',\\n ],\\n extras_require={\\n 'gcp': [\\n 'gevent', 'google-api-python-client', 'google-compute-engine',\\n 'google-cloud-storage', 'oauth2client'\\n ],\\n 'test': ['pytest'],\\n\\n # Cloud TPU requirements.\\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\\n },\\n classifiers=[\\n 'Development Status :: 4 - Beta',\\n 'Intended Audience :: Developers',\\n 'Intended Audience :: Science/Research',\\n 'License :: OSI Approved :: Apache Software License',\\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\\n ],\\n keywords='text nlp machinelearning',\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5X.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n_jax_version = '0.2.27'\n_jaxlib_version = '0.1.76'\n\nsetuptools.setup(\n name='t5x',\n version=__version__,\n description='T5-eXtended in JAX',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='no-reply@google.com',\n url='http://github.com/google-research/t5x',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'cached_property',\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\n 'flax @ git+https://github.com/google/flax#egg=flax',\n 'gin-config',\n f'jax >= {_jax_version}',\n f'jaxlib >= {_jaxlib_version}',\n 'numpy',\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n 'tensorstore >= 0.1.20',\n ],\n extras_require={\n 'gcp': [\n 'gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'\n ],\n 'test': ['pytest'],\n\n # Cloud TPU requirements.\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"# Copyright 2022 The T5X Authors.\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# http://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\\n\\n\\\"\\\"\\\"Install T5X.\\\"\\\"\\\"\\n\\nimport os\\nimport sys\\nimport setuptools\\n\\n# To enable importing version.py directly, we add its path to sys.path.\\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\\nsys.path.append(version_path)\\nfrom version import __version__ # pylint: disable=g-import-not-at-top\\n\\n# Get the long description from the README file.\\nwith open('README.md') as fp:\\n _LONG_DESCRIPTION = fp.read()\\n\\n_jax_version = '0.2.27'\\n_jaxlib_version = '0.1.76'\\n\\nsetuptools.setup(\\n name='t5x',\\n version=__version__,\\n description='T5-eXtended in JAX',\\n long_description=_LONG_DESCRIPTION,\\n long_description_content_type='text/markdown',\\n author='Google Inc.',\\n author_email='no-reply@google.com',\\n url='http://github.com/google-research/t5x',\\n license='Apache 2.0',\\n packages=setuptools.find_packages(),\\n package_data={\\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\\n },\\n scripts=[],\\n install_requires=[\\n 'absl-py',\\n 'cached_property',\\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\\n 'flax @ git+https://github.com/google/flax#egg=flax',\\n 'gin-config',\\n f'jax >= {_jax_version}',\\n f'jaxlib >= {_jaxlib_version}',\\n 'numpy',\\n 'seqio-nightly',\\n 't5',\\n 'tensorflow',\\n 'tensorstore >= 0.1.20',\\n ],\\n extras_require={\\n 'gcp': [\\n 'gevent', 'google-api-python-client', 'google-compute-engine',\\n 'google-cloud-storage', 'oauth2client'\\n ],\\n 'test': ['pytest'],\\n\\n # Cloud TPU requirements.\\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\\n },\\n classifiers=[\\n 'Development Status :: 4 - Beta',\\n 'Intended Audience :: Developers',\\n 'Intended Audience :: Science/Research',\\n 'License :: OSI Approved :: Apache Software License',\\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\\n ],\\n keywords='text nlp machinelearning',\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex d9dbe74ea..02f838b6a 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n- 'tensorstore',\n+ 'tensorstore >= 0.1.20',\n ],\n extras_require={\n 'gcp': [\n"}}},{"rowIdx":426,"cells":{"in_source_id":{"kind":"string","value":"pallets__werkzeug-2320"},"issue":{"kind":"string","value":"ProxyMiddleware encodes characters allowed in path\nWe have a dollar sign in URL and proxy middleware converts it to `%24`. Unfortunately, for backend server url with `$` and url with `%24` are two different urls. It could be fixed by extending safe characters in middleware as it done in [url_fix](https://github.com/pallets/werkzeug/blob/9efe8c00dcb2b6fc086961ba304729db01912652/src/werkzeug/urls.py#L691) function, but it seems to me that the right way is to make the middleware to use `REQUEST_URI`/`RAW_URI` when possible. This way also can fix middleware for cases like the one described in https://github.com/pallets/werkzeug/issues/766.\r\n\r\nI can send PR, but since it will require some efforts on making `remove_prefix` option working correctly and to ensure backward compatibility for public method `proxy_to`, I'd like to discuss this solution first. If it can be accepted, then I proceed.\n"},"before_files":{"kind":"list like","value":[{"content":"\"\"\"Functions for working with URLs.\n\nContains implementations of functions from :mod:`urllib.parse` that\nhandle bytes and strings.\n\"\"\"\nimport codecs\nimport os\nimport re\nimport typing as t\n\nfrom ._internal import _check_str_tuple\nfrom ._internal import _decode_idna\nfrom ._internal import _encode_idna\nfrom ._internal import _make_encode_wrapper\nfrom ._internal import _to_str\n\nif t.TYPE_CHECKING:\n from . import datastructures as ds\n\n# A regular expression for what a valid schema looks like\n_scheme_re = re.compile(r\"^[a-zA-Z0-9+-.]+$\")\n\n# Characters that are safe in any part of an URL.\n_always_safe = frozenset(\n bytearray(\n b\"abcdefghijklmnopqrstuvwxyz\"\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n b\"0123456789\"\n b\"-._~\"\n )\n)\n\n_hexdigits = \"0123456789ABCDEFabcdef\"\n_hextobyte = {\n f\"{a}{b}\".encode(\"ascii\"): int(f\"{a}{b}\", 16)\n for a in _hexdigits\n for b in _hexdigits\n}\n_bytetohex = [f\"%{char:02X}\".encode(\"ascii\") for char in range(256)]\n\n\nclass _URLTuple(t.NamedTuple):\n scheme: str\n netloc: str\n path: str\n query: str\n fragment: str\n\n\nclass BaseURL(_URLTuple):\n \"\"\"Superclass of :py:class:`URL` and :py:class:`BytesURL`.\"\"\"\n\n __slots__ = ()\n _at: str\n _colon: str\n _lbracket: str\n _rbracket: str\n\n def __str__(self) -> str:\n return self.to_url()\n\n def replace(self, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Return an URL with the same values, except for those parameters\n given new values by whichever keyword arguments are specified.\"\"\"\n return self._replace(**kwargs)\n\n @property\n def host(self) -> t.Optional[str]:\n \"\"\"The host part of the URL if available, otherwise `None`. The\n host is either the hostname or the IP address mentioned in the\n URL. It will not contain the port.\n \"\"\"\n return self._split_host()[0]\n\n @property\n def ascii_host(self) -> t.Optional[str]:\n \"\"\"Works exactly like :attr:`host` but will return a result that\n is restricted to ASCII. If it finds a netloc that is not ASCII\n it will attempt to idna decode it. This is useful for socket\n operations when the URL might include internationalized characters.\n \"\"\"\n rv = self.host\n if rv is not None and isinstance(rv, str):\n try:\n rv = _encode_idna(rv) # type: ignore\n except UnicodeError:\n rv = rv.encode(\"ascii\", \"ignore\") # type: ignore\n return _to_str(rv, \"ascii\", \"ignore\")\n\n @property\n def port(self) -> t.Optional[int]:\n \"\"\"The port in the URL as an integer if it was present, `None`\n otherwise. This does not fill in default ports.\n \"\"\"\n try:\n rv = int(_to_str(self._split_host()[1]))\n if 0 <= rv <= 65535:\n return rv\n except (ValueError, TypeError):\n pass\n return None\n\n @property\n def auth(self) -> t.Optional[str]:\n \"\"\"The authentication part in the URL if available, `None`\n otherwise.\n \"\"\"\n return self._split_netloc()[0]\n\n @property\n def username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[0]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n Unlike :attr:`username` this one is not being decoded.\n \"\"\"\n return self._split_auth()[0]\n\n @property\n def password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[1]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n Unlike :attr:`password` this one is not being decoded.\n \"\"\"\n return self._split_auth()[1]\n\n def decode_query(self, *args: t.Any, **kwargs: t.Any) -> \"ds.MultiDict[str, str]\":\n \"\"\"Decodes the query part of the URL. Ths is a shortcut for\n calling :func:`url_decode` on the query argument. The arguments and\n keyword arguments are forwarded to :func:`url_decode` unchanged.\n \"\"\"\n return url_decode(self.query, *args, **kwargs)\n\n def join(self, *args: t.Any, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Joins this URL with another one. This is just a convenience\n function for calling into :meth:`url_join` and then parsing the\n return value again.\n \"\"\"\n return url_parse(url_join(self, *args, **kwargs))\n\n def to_url(self) -> str:\n \"\"\"Returns a URL string or bytes depending on the type of the\n information stored. This is just a convenience function\n for calling :meth:`url_unparse` for this URL.\n \"\"\"\n return url_unparse(self)\n\n def encode_netloc(self) -> str:\n \"\"\"Encodes the netloc part to an ASCII safe URL as bytes.\"\"\"\n rv = self.ascii_host or \"\"\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n url_quote(self.raw_username or \"\", \"utf-8\", \"strict\", \"/:%\"),\n url_quote(self.raw_password or \"\", \"utf-8\", \"strict\", \"/:%\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def decode_netloc(self) -> str:\n \"\"\"Decodes the netloc part into a string.\"\"\"\n rv = _decode_idna(self.host or \"\")\n\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n _url_unquote_legacy(self.raw_username or \"\", \"/:%@\"),\n _url_unquote_legacy(self.raw_password or \"\", \"/:%@\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def to_uri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`BytesURL` tuple that holds a URI. This will\n encode all the information in the URL properly to ASCII using the\n rules a web browser would follow.\n\n It's usually more interesting to directly call :meth:`iri_to_uri` which\n will return a string.\n \"\"\"\n return url_parse(iri_to_uri(self))\n\n def to_iri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`URL` tuple that holds a IRI. This will try\n to decode as much information as possible in the URL without\n losing information similar to how a web browser does it for the\n URL bar.\n\n It's usually more interesting to directly call :meth:`uri_to_iri` which\n will return a string.\n \"\"\"\n return url_parse(uri_to_iri(self))\n\n def get_file_location(\n self, pathformat: t.Optional[str] = None\n ) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n \"\"\"Returns a tuple with the location of the file in the form\n ``(server, location)``. If the netloc is empty in the URL or\n points to localhost, it's represented as ``None``.\n\n The `pathformat` by default is autodetection but needs to be set\n when working with URLs of a specific system. The supported values\n are ``'windows'`` when working with Windows or DOS paths and\n ``'posix'`` when working with posix paths.\n\n If the URL does not point to a local file, the server and location\n are both represented as ``None``.\n\n :param pathformat: The expected format of the path component.\n Currently ``'windows'`` and ``'posix'`` are\n supported. Defaults to ``None`` which is\n autodetect.\n \"\"\"\n if self.scheme != \"file\":\n return None, None\n\n path = url_unquote(self.path)\n host = self.netloc or None\n\n if pathformat is None:\n if os.name == \"nt\":\n pathformat = \"windows\"\n else:\n pathformat = \"posix\"\n\n if pathformat == \"windows\":\n if path[:1] == \"/\" and path[1:2].isalpha() and path[2:3] in \"|:\":\n path = f\"{path[1:2]}:{path[3:]}\"\n windows_share = path[:3] in (\"\\\\\" * 3, \"/\" * 3)\n import ntpath\n\n path = ntpath.normpath(path)\n # Windows shared drives are represented as ``\\\\host\\\\directory``.\n # That results in a URL like ``file://///host/directory``, and a\n # path like ``///host/directory``. We need to special-case this\n # because the path contains the hostname.\n if windows_share and host is None:\n parts = path.lstrip(\"\\\\\").split(\"\\\\\", 1)\n if len(parts) == 2:\n host, path = parts\n else:\n host = parts[0]\n path = \"\"\n elif pathformat == \"posix\":\n import posixpath\n\n path = posixpath.normpath(path)\n else:\n raise TypeError(f\"Invalid path format {pathformat!r}\")\n\n if host in (\"127.0.0.1\", \"::1\", \"localhost\"):\n host = None\n\n return host, path\n\n def _split_netloc(self) -> t.Tuple[t.Optional[str], str]:\n if self._at in self.netloc:\n auth, _, netloc = self.netloc.partition(self._at)\n return auth, netloc\n return None, self.netloc\n\n def _split_auth(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n auth = self._split_netloc()[0]\n if not auth:\n return None, None\n if self._colon not in auth:\n return auth, None\n\n username, _, password = auth.partition(self._colon)\n return username, password\n\n def _split_host(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n rv = self._split_netloc()[1]\n if not rv:\n return None, None\n\n if not rv.startswith(self._lbracket):\n if self._colon in rv:\n host, _, port = rv.partition(self._colon)\n return host, port\n return rv, None\n\n idx = rv.find(self._rbracket)\n if idx < 0:\n return rv, None\n\n host = rv[1:idx]\n rest = rv[idx + 1 :]\n if rest.startswith(self._colon):\n return host, rest[1:]\n return host, None\n\n\nclass URL(BaseURL):\n \"\"\"Represents a parsed URL. This behaves like a regular tuple but\n also has some extra attributes that give further insight into the\n URL.\n \"\"\"\n\n __slots__ = ()\n _at = \"@\"\n _colon = \":\"\n _lbracket = \"[\"\n _rbracket = \"]\"\n\n def encode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"BytesURL\":\n \"\"\"Encodes the URL to a tuple made out of bytes. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return BytesURL(\n self.scheme.encode(\"ascii\"), # type: ignore\n self.encode_netloc(),\n self.path.encode(charset, errors), # type: ignore\n self.query.encode(charset, errors), # type: ignore\n self.fragment.encode(charset, errors), # type: ignore\n )\n\n\nclass BytesURL(BaseURL):\n \"\"\"Represents a parsed URL in bytes.\"\"\"\n\n __slots__ = ()\n _at = b\"@\" # type: ignore\n _colon = b\":\" # type: ignore\n _lbracket = b\"[\" # type: ignore\n _rbracket = b\"]\" # type: ignore\n\n def __str__(self) -> str:\n return self.to_url().decode(\"utf-8\", \"replace\") # type: ignore\n\n def encode_netloc(self) -> bytes: # type: ignore\n \"\"\"Returns the netloc unchanged as bytes.\"\"\"\n return self.netloc # type: ignore\n\n def decode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"URL\":\n \"\"\"Decodes the URL to a tuple made out of strings. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return URL(\n self.scheme.decode(\"ascii\"), # type: ignore\n self.decode_netloc(),\n self.path.decode(charset, errors), # type: ignore\n self.query.decode(charset, errors), # type: ignore\n self.fragment.decode(charset, errors), # type: ignore\n )\n\n\n_unquote_maps: t.Dict[t.FrozenSet[int], t.Dict[bytes, int]] = {frozenset(): _hextobyte}\n\n\ndef _unquote_to_bytes(\n string: t.Union[str, bytes], unsafe: t.Union[str, bytes] = \"\"\n) -> bytes:\n if isinstance(string, str):\n string = string.encode(\"utf-8\")\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(\"utf-8\")\n\n unsafe = frozenset(bytearray(unsafe))\n groups = iter(string.split(b\"%\"))\n result = bytearray(next(groups, b\"\"))\n\n try:\n hex_to_byte = _unquote_maps[unsafe]\n except KeyError:\n hex_to_byte = _unquote_maps[unsafe] = {\n h: b for h, b in _hextobyte.items() if b not in unsafe\n }\n\n for group in groups:\n code = group[:2]\n\n if code in hex_to_byte:\n result.append(hex_to_byte[code])\n result.extend(group[2:])\n else:\n result.append(37) # %\n result.extend(group)\n\n return bytes(result)\n\n\ndef _url_encode_impl(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str,\n sort: bool,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]],\n) -> t.Iterator[str]:\n from .datastructures import iter_multi_items\n\n iterable: t.Iterable[t.Tuple[str, str]] = iter_multi_items(obj)\n\n if sort:\n iterable = sorted(iterable, key=key)\n\n for key_str, value_str in iterable:\n if value_str is None:\n continue\n\n if not isinstance(key_str, bytes):\n key_bytes = str(key_str).encode(charset)\n else:\n key_bytes = key_str\n\n if not isinstance(value_str, bytes):\n value_bytes = str(value_str).encode(charset)\n else:\n value_bytes = value_str\n\n yield f\"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}\"\n\n\ndef _url_unquote_legacy(value: str, unsafe: str = \"\") -> str:\n try:\n return url_unquote(value, charset=\"utf-8\", errors=\"strict\", unsafe=unsafe)\n except UnicodeError:\n return url_unquote(value, charset=\"latin1\", unsafe=unsafe)\n\n\ndef url_parse(\n url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True\n) -> BaseURL:\n \"\"\"Parses a URL from a string into a :class:`URL` tuple. If the URL\n is lacking a scheme it can be provided as second argument. Otherwise,\n it is ignored. Optionally fragments can be stripped from the URL\n by setting `allow_fragments` to `False`.\n\n The inverse of this function is :func:`url_unparse`.\n\n :param url: the URL to parse.\n :param scheme: the default schema to use if the URL is schemaless.\n :param allow_fragments: if set to `False` a fragment will be removed\n from the URL.\n \"\"\"\n s = _make_encode_wrapper(url)\n is_text_based = isinstance(url, str)\n\n if scheme is None:\n scheme = s(\"\")\n netloc = query = fragment = s(\"\")\n i = url.find(s(\":\"))\n if i > 0 and _scheme_re.match(_to_str(url[:i], errors=\"replace\")):\n # make sure \"iri\" is not actually a port number (in which case\n # \"scheme\" is really part of the path)\n rest = url[i + 1 :]\n if not rest or any(c not in s(\"0123456789\") for c in rest):\n # not a port number\n scheme, url = url[:i].lower(), rest\n\n if url[:2] == s(\"//\"):\n delim = len(url)\n for c in s(\"/?#\"):\n wdelim = url.find(c, 2)\n if wdelim >= 0:\n delim = min(delim, wdelim)\n netloc, url = url[2:delim], url[delim:]\n if (s(\"[\") in netloc and s(\"]\") not in netloc) or (\n s(\"]\") in netloc and s(\"[\") not in netloc\n ):\n raise ValueError(\"Invalid IPv6 URL\")\n\n if allow_fragments and s(\"#\") in url:\n url, fragment = url.split(s(\"#\"), 1)\n if s(\"?\") in url:\n url, query = url.split(s(\"?\"), 1)\n\n result_type = URL if is_text_based else BytesURL\n return result_type(scheme, netloc, url, query, fragment)\n\n\ndef _make_fast_url_quote(\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> t.Callable[[bytes], str]:\n \"\"\"Precompile the translation table for a URL encoding function.\n\n Unlike :func:`url_quote`, the generated function only takes the\n string to quote.\n\n :param charset: The charset to encode the result with.\n :param errors: How to handle encoding errors.\n :param safe: An optional sequence of safe characters to never encode.\n :param unsafe: An optional sequence of unsafe characters to always encode.\n \"\"\"\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n table = [chr(c) if c in safe else f\"%{c:02X}\" for c in range(256)]\n\n def quote(string: bytes) -> str:\n return \"\".join([table[c] for c in string])\n\n return quote\n\n\n_fast_url_quote = _make_fast_url_quote()\n_fast_quote_plus = _make_fast_url_quote(safe=\" \", unsafe=\"+\")\n\n\ndef _fast_url_quote_plus(string: bytes) -> str:\n return _fast_quote_plus(string).replace(\" \", \"+\")\n\n\ndef url_quote(\n string: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> str:\n \"\"\"URL encode a single string with a given encoding.\n\n :param s: the string to quote.\n :param charset: the charset to be used.\n :param safe: an optional sequence of safe characters.\n :param unsafe: an optional sequence of unsafe characters.\n\n .. versionadded:: 0.9.2\n The `unsafe` parameter was added.\n \"\"\"\n if not isinstance(string, (str, bytes, bytearray)):\n string = str(string)\n if isinstance(string, str):\n string = string.encode(charset, errors)\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n rv = bytearray()\n for char in bytearray(string):\n if char in safe:\n rv.append(char)\n else:\n rv.extend(_bytetohex[char])\n return bytes(rv).decode(charset)\n\n\ndef url_quote_plus(\n string: str, charset: str = \"utf-8\", errors: str = \"strict\", safe: str = \"\"\n) -> str:\n \"\"\"URL encode a single string with the given encoding and convert\n whitespace to \"+\".\n\n :param s: The string to quote.\n :param charset: The charset to be used.\n :param safe: An optional sequence of safe characters.\n \"\"\"\n return url_quote(string, charset, errors, safe + \" \", \"+\").replace(\" \", \"+\")\n\n\ndef url_unparse(components: t.Tuple[str, str, str, str, str]) -> str:\n \"\"\"The reverse operation to :meth:`url_parse`. This accepts arbitrary\n as well as :class:`URL` tuples and returns a URL as a string.\n\n :param components: the parsed URL as tuple which should be converted\n into a URL string.\n \"\"\"\n _check_str_tuple(components)\n scheme, netloc, path, query, fragment = components\n s = _make_encode_wrapper(scheme)\n url = s(\"\")\n\n # We generally treat file:///x and file:/x the same which is also\n # what browsers seem to do. This also allows us to ignore a schema\n # register for netloc utilization or having to differentiate between\n # empty and missing netloc.\n if netloc or (scheme and path.startswith(s(\"/\"))):\n if path and path[:1] != s(\"/\"):\n path = s(\"/\") + path\n url = s(\"//\") + (netloc or s(\"\")) + path\n elif path:\n url += path\n if scheme:\n url = scheme + s(\":\") + url\n if query:\n url = url + s(\"?\") + query\n if fragment:\n url = url + s(\"#\") + fragment\n return url\n\n\ndef url_unquote(\n s: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"replace\",\n unsafe: str = \"\",\n) -> str:\n \"\"\"URL decode a single string with a given encoding. If the charset\n is set to `None` no decoding is performed and raw bytes are\n returned.\n\n :param s: the string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: the error handling for the charset decoding.\n \"\"\"\n rv = _unquote_to_bytes(s, unsafe)\n if charset is None:\n return rv\n return rv.decode(charset, errors)\n\n\ndef url_unquote_plus(\n s: t.Union[str, bytes], charset: str = \"utf-8\", errors: str = \"replace\"\n) -> str:\n \"\"\"URL decode a single string with the given `charset` and decode \"+\" to\n whitespace.\n\n Per default encoding errors are ignored. If you want a different behavior\n you can set `errors` to ``'replace'`` or ``'strict'``.\n\n :param s: The string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: The error handling for the `charset` decoding.\n \"\"\"\n if isinstance(s, str):\n s = s.replace(\"+\", \" \")\n else:\n s = s.replace(b\"+\", b\" \")\n return url_unquote(s, charset, errors)\n\n\ndef url_fix(s: str, charset: str = \"utf-8\") -> str:\n r\"\"\"Sometimes you get an URL by a user that just isn't a real URL because\n it contains unsafe characters like ' ' and so on. This function can fix\n some of the problems in a similar way browsers handle data entered by the\n user:\n\n >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\\xe4rung)')\n 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'\n\n :param s: the string with the URL to fix.\n :param charset: The target charset for the URL if the url was given\n as a string.\n \"\"\"\n # First step is to switch to text processing and to convert\n # backslashes (which are invalid in URLs anyways) to slashes. This is\n # consistent with what Chrome does.\n s = _to_str(s, charset, \"replace\").replace(\"\\\\\", \"/\")\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if s.startswith(\"file://\") and s[7:8].isalpha() and s[8:10] in (\":/\", \"|/\"):\n s = f\"file:///{s[7:]}\"\n\n url = url_parse(s)\n path = url_quote(url.path, charset, safe=\"/%+$!*'(),\")\n qs = url_quote_plus(url.query, charset, safe=\":&%=+$!*'(),\")\n anchor = url_quote_plus(url.fragment, charset, safe=\":&%=+$!*'(),\")\n return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))\n\n\n# not-unreserved characters remain quoted when unquoting to IRI\n_to_iri_unsafe = \"\".join([chr(c) for c in range(128) if c not in _always_safe])\n\n\ndef _codec_error_url_quote(e: UnicodeError) -> t.Tuple[str, int]:\n \"\"\"Used in :func:`uri_to_iri` after unquoting to re-quote any\n invalid bytes.\n \"\"\"\n # the docs state that UnicodeError does have these attributes,\n # but mypy isn't picking them up\n out = _fast_url_quote(e.object[e.start : e.end]) # type: ignore\n return out, e.end # type: ignore\n\n\ncodecs.register_error(\"werkzeug.url_quote\", _codec_error_url_quote)\n\n\ndef uri_to_iri(\n uri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"werkzeug.url_quote\",\n) -> str:\n \"\"\"Convert a URI to an IRI. All valid UTF-8 characters are unquoted,\n leaving all reserved and invalid characters quoted. If the URL has\n a domain, it is decoded from Punycode.\n\n >>> uri_to_iri(\"http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF\")\n 'http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF'\n\n :param uri: The URI to convert.\n :param charset: The encoding to encode unquoted bytes with.\n :param errors: Error handler to use during ``bytes.encode``. By\n default, invalid bytes are left quoted.\n\n .. versionchanged:: 0.15\n All reserved and invalid characters remain quoted. Previously,\n only some reserved characters were preserved, and invalid bytes\n were replaced instead of left quoted.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(uri, tuple):\n uri = url_unparse(uri)\n\n uri = url_parse(_to_str(uri, charset))\n path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)\n query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)\n fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)\n return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))\n\n\n# reserved characters remain unquoted when quoting to URI\n_to_uri_safe = \":/?#[]@!$&'()*+,;=%\"\n\n\ndef iri_to_uri(\n iri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe_conversion: bool = False,\n) -> str:\n \"\"\"Convert an IRI to a URI. All non-ASCII and unsafe characters are\n quoted. If the URL has a domain, it is encoded to Punycode.\n\n >>> iri_to_uri('http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF')\n 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'\n\n :param iri: The IRI to convert.\n :param charset: The encoding of the IRI.\n :param errors: Error handler to use during ``bytes.encode``.\n :param safe_conversion: Return the URL unchanged if it only contains\n ASCII characters and no whitespace. See the explanation below.\n\n There is a general problem with IRI conversion with some protocols\n that are in violation of the URI specification. Consider the\n following two IRIs::\n\n magnet:?xt=uri:whatever\n itms-services://?action=download-manifest\n\n After parsing, we don't know if the scheme requires the ``//``,\n which is dropped if empty, but conveys different meanings in the\n final URL if it's present or not. In this case, you can use\n ``safe_conversion``, which will return the URL unchanged if it only\n contains ASCII characters and no whitespace. This can result in a\n URI with unquoted characters if it was not already quoted correctly,\n but preserves the URL's semantics. Werkzeug uses this for the\n ``Location`` header for redirects.\n\n .. versionchanged:: 0.15\n All reserved characters remain unquoted. Previously, only some\n reserved characters were left unquoted.\n\n .. versionchanged:: 0.9.6\n The ``safe_conversion`` parameter was added.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(iri, tuple):\n iri = url_unparse(iri)\n\n if safe_conversion:\n # If we're not sure if it's safe to convert the URL, and it only\n # contains ASCII characters, return it unconverted.\n try:\n native_iri = _to_str(iri)\n ascii_iri = native_iri.encode(\"ascii\")\n\n # Only return if it doesn't have whitespace. (Why?)\n if len(ascii_iri.split()) == 1:\n return native_iri\n except UnicodeError:\n pass\n\n iri = url_parse(_to_str(iri, charset, errors))\n path = url_quote(iri.path, charset, errors, _to_uri_safe)\n query = url_quote(iri.query, charset, errors, _to_uri_safe)\n fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)\n return url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))\n\n\ndef url_decode(\n s: t.AnyStr,\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: str = \"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Parse a query string and return it as a :class:`MultiDict`.\n\n :param s: The query string to parse.\n :param charset: Decode bytes to string with this charset. If not\n given, bytes are returned as-is.\n :param include_empty: Include keys with empty values in the dict.\n :param errors: Error handling behavior when decoding bytes.\n :param separator: Separator character between pairs.\n :param cls: Container to hold result instead of :class:`MultiDict`.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n In previous versions \";\" and \"&\" could be used for url decoding.\n Now only \"&\" is supported. If you want to use \";\", a different\n ``separator`` can be provided.\n\n .. versionchanged:: 0.5\n The ``cls`` parameter was added.\n \"\"\"\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n if isinstance(s, str) and not isinstance(separator, str):\n separator = separator.decode(charset or \"ascii\")\n elif isinstance(s, bytes) and not isinstance(separator, bytes):\n separator = separator.encode(charset or \"ascii\") # type: ignore\n return cls(\n _url_decode_impl(\n s.split(separator), charset, include_empty, errors # type: ignore\n )\n )\n\n\ndef url_decode_stream(\n stream: t.IO[bytes],\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: bytes = b\"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n limit: t.Optional[int] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Works like :func:`url_decode` but decodes a stream. The behavior\n of stream and limit follows functions like\n :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is\n directly fed to the `cls` so you can consume the data while it's\n parsed.\n\n :param stream: a stream with the encoded querystring\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param include_empty: Set to `False` if you don't want empty values to\n appear in the dict.\n :param errors: the decoding error behavior.\n :param separator: the pair separator to be used, defaults to ``&``\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`MultiDict` is used.\n :param limit: the content length of the URL data. Not necessary if\n a limited stream is provided.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` and ``return_iterator`` parameters are\n deprecated and will be removed in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n from .wsgi import make_chunk_iter\n\n pair_iter = make_chunk_iter(stream, separator, limit)\n decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)\n\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n\n return cls(decoder)\n\n\ndef _url_decode_impl(\n pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str\n) -> t.Iterator[t.Tuple[str, str]]:\n for pair in pair_iter:\n if not pair:\n continue\n s = _make_encode_wrapper(pair)\n equal = s(\"=\")\n if equal in pair:\n key, value = pair.split(equal, 1)\n else:\n if not include_empty:\n continue\n key = pair\n value = s(\"\")\n yield (\n url_unquote_plus(key, charset, errors),\n url_unquote_plus(value, charset, errors),\n )\n\n\ndef url_encode(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> str:\n \"\"\"URL encode a dict/`MultiDict`. If a value is `None` it will not appear\n in the result string. Per default only values are encoded into the target\n charset strings.\n\n :param obj: the object to encode into a query string.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n Added the ``sort``, ``key``, and ``separator`` parameters.\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n return separator.join(_url_encode_impl(obj, charset, sort, key))\n\n\ndef url_encode_stream(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n stream: t.Optional[t.IO[str]] = None,\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> None:\n \"\"\"Like :meth:`url_encode` but writes the results to a stream\n object. If the stream is `None` a generator over all encoded\n pairs is returned.\n\n :param obj: the object to encode into a query string.\n :param stream: a stream to write the encoded object into or `None` if\n an iterator over the encoded pairs should be returned. In\n that case the separator argument is ignored.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n gen = _url_encode_impl(obj, charset, sort, key)\n if stream is None:\n return gen # type: ignore\n for idx, chunk in enumerate(gen):\n if idx:\n stream.write(separator)\n stream.write(chunk)\n return None\n\n\ndef url_join(\n base: t.Union[str, t.Tuple[str, str, str, str, str]],\n url: t.Union[str, t.Tuple[str, str, str, str, str]],\n allow_fragments: bool = True,\n) -> str:\n \"\"\"Join a base URL and a possibly relative URL to form an absolute\n interpretation of the latter.\n\n :param base: the base URL for the join operation.\n :param url: the URL to join.\n :param allow_fragments: indicates whether fragments should be allowed.\n \"\"\"\n if isinstance(base, tuple):\n base = url_unparse(base)\n if isinstance(url, tuple):\n url = url_unparse(url)\n\n _check_str_tuple((base, url))\n s = _make_encode_wrapper(base)\n\n if not base:\n return url\n if not url:\n return base\n\n bscheme, bnetloc, bpath, bquery, bfragment = url_parse(\n base, allow_fragments=allow_fragments\n )\n scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)\n if scheme != bscheme:\n return url\n if netloc:\n return url_unparse((scheme, netloc, path, query, fragment))\n netloc = bnetloc\n\n if path[:1] == s(\"/\"):\n segments = path.split(s(\"/\"))\n elif not path:\n segments = bpath.split(s(\"/\"))\n if not query:\n query = bquery\n else:\n segments = bpath.split(s(\"/\"))[:-1] + path.split(s(\"/\"))\n\n # If the rightmost part is \"./\" we want to keep the slash but\n # remove the dot.\n if segments[-1] == s(\".\"):\n segments[-1] = s(\"\")\n\n # Resolve \"..\" and \".\"\n segments = [segment for segment in segments if segment != s(\".\")]\n while True:\n i = 1\n n = len(segments) - 1\n while i < n:\n if segments[i] == s(\"..\") and segments[i - 1] not in (s(\"\"), s(\"..\")):\n del segments[i - 1 : i + 1]\n break\n i += 1\n else:\n break\n\n # Remove trailing \"..\" if the URL is absolute\n unwanted_marker = [s(\"\"), s(\"..\")]\n while segments[:2] == unwanted_marker:\n del segments[1]\n\n path = s(\"/\").join(segments)\n return url_unparse((scheme, netloc, path, query, fragment))\n","path":"src/werkzeug/urls.py"}],"string":"[\n {\n \"content\": \"\\\"\\\"\\\"Functions for working with URLs.\\n\\nContains implementations of functions from :mod:`urllib.parse` that\\nhandle bytes and strings.\\n\\\"\\\"\\\"\\nimport codecs\\nimport os\\nimport re\\nimport typing as t\\n\\nfrom ._internal import _check_str_tuple\\nfrom ._internal import _decode_idna\\nfrom ._internal import _encode_idna\\nfrom ._internal import _make_encode_wrapper\\nfrom ._internal import _to_str\\n\\nif t.TYPE_CHECKING:\\n from . import datastructures as ds\\n\\n# A regular expression for what a valid schema looks like\\n_scheme_re = re.compile(r\\\"^[a-zA-Z0-9+-.]+$\\\")\\n\\n# Characters that are safe in any part of an URL.\\n_always_safe = frozenset(\\n bytearray(\\n b\\\"abcdefghijklmnopqrstuvwxyz\\\"\\n b\\\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\\\"\\n b\\\"0123456789\\\"\\n b\\\"-._~\\\"\\n )\\n)\\n\\n_hexdigits = \\\"0123456789ABCDEFabcdef\\\"\\n_hextobyte = {\\n f\\\"{a}{b}\\\".encode(\\\"ascii\\\"): int(f\\\"{a}{b}\\\", 16)\\n for a in _hexdigits\\n for b in _hexdigits\\n}\\n_bytetohex = [f\\\"%{char:02X}\\\".encode(\\\"ascii\\\") for char in range(256)]\\n\\n\\nclass _URLTuple(t.NamedTuple):\\n scheme: str\\n netloc: str\\n path: str\\n query: str\\n fragment: str\\n\\n\\nclass BaseURL(_URLTuple):\\n \\\"\\\"\\\"Superclass of :py:class:`URL` and :py:class:`BytesURL`.\\\"\\\"\\\"\\n\\n __slots__ = ()\\n _at: str\\n _colon: str\\n _lbracket: str\\n _rbracket: str\\n\\n def __str__(self) -> str:\\n return self.to_url()\\n\\n def replace(self, **kwargs: t.Any) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Return an URL with the same values, except for those parameters\\n given new values by whichever keyword arguments are specified.\\\"\\\"\\\"\\n return self._replace(**kwargs)\\n\\n @property\\n def host(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The host part of the URL if available, otherwise `None`. The\\n host is either the hostname or the IP address mentioned in the\\n URL. It will not contain the port.\\n \\\"\\\"\\\"\\n return self._split_host()[0]\\n\\n @property\\n def ascii_host(self) -> t.Optional[str]:\\n \\\"\\\"\\\"Works exactly like :attr:`host` but will return a result that\\n is restricted to ASCII. If it finds a netloc that is not ASCII\\n it will attempt to idna decode it. This is useful for socket\\n operations when the URL might include internationalized characters.\\n \\\"\\\"\\\"\\n rv = self.host\\n if rv is not None and isinstance(rv, str):\\n try:\\n rv = _encode_idna(rv) # type: ignore\\n except UnicodeError:\\n rv = rv.encode(\\\"ascii\\\", \\\"ignore\\\") # type: ignore\\n return _to_str(rv, \\\"ascii\\\", \\\"ignore\\\")\\n\\n @property\\n def port(self) -> t.Optional[int]:\\n \\\"\\\"\\\"The port in the URL as an integer if it was present, `None`\\n otherwise. This does not fill in default ports.\\n \\\"\\\"\\\"\\n try:\\n rv = int(_to_str(self._split_host()[1]))\\n if 0 <= rv <= 65535:\\n return rv\\n except (ValueError, TypeError):\\n pass\\n return None\\n\\n @property\\n def auth(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The authentication part in the URL if available, `None`\\n otherwise.\\n \\\"\\\"\\\"\\n return self._split_netloc()[0]\\n\\n @property\\n def username(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The username if it was part of the URL, `None` otherwise.\\n This undergoes URL decoding and will always be a string.\\n \\\"\\\"\\\"\\n rv = self._split_auth()[0]\\n if rv is not None:\\n return _url_unquote_legacy(rv)\\n return None\\n\\n @property\\n def raw_username(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The username if it was part of the URL, `None` otherwise.\\n Unlike :attr:`username` this one is not being decoded.\\n \\\"\\\"\\\"\\n return self._split_auth()[0]\\n\\n @property\\n def password(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The password if it was part of the URL, `None` otherwise.\\n This undergoes URL decoding and will always be a string.\\n \\\"\\\"\\\"\\n rv = self._split_auth()[1]\\n if rv is not None:\\n return _url_unquote_legacy(rv)\\n return None\\n\\n @property\\n def raw_password(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The password if it was part of the URL, `None` otherwise.\\n Unlike :attr:`password` this one is not being decoded.\\n \\\"\\\"\\\"\\n return self._split_auth()[1]\\n\\n def decode_query(self, *args: t.Any, **kwargs: t.Any) -> \\\"ds.MultiDict[str, str]\\\":\\n \\\"\\\"\\\"Decodes the query part of the URL. Ths is a shortcut for\\n calling :func:`url_decode` on the query argument. The arguments and\\n keyword arguments are forwarded to :func:`url_decode` unchanged.\\n \\\"\\\"\\\"\\n return url_decode(self.query, *args, **kwargs)\\n\\n def join(self, *args: t.Any, **kwargs: t.Any) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Joins this URL with another one. This is just a convenience\\n function for calling into :meth:`url_join` and then parsing the\\n return value again.\\n \\\"\\\"\\\"\\n return url_parse(url_join(self, *args, **kwargs))\\n\\n def to_url(self) -> str:\\n \\\"\\\"\\\"Returns a URL string or bytes depending on the type of the\\n information stored. This is just a convenience function\\n for calling :meth:`url_unparse` for this URL.\\n \\\"\\\"\\\"\\n return url_unparse(self)\\n\\n def encode_netloc(self) -> str:\\n \\\"\\\"\\\"Encodes the netloc part to an ASCII safe URL as bytes.\\\"\\\"\\\"\\n rv = self.ascii_host or \\\"\\\"\\n if \\\":\\\" in rv:\\n rv = f\\\"[{rv}]\\\"\\n port = self.port\\n if port is not None:\\n rv = f\\\"{rv}:{port}\\\"\\n auth = \\\":\\\".join(\\n filter(\\n None,\\n [\\n url_quote(self.raw_username or \\\"\\\", \\\"utf-8\\\", \\\"strict\\\", \\\"/:%\\\"),\\n url_quote(self.raw_password or \\\"\\\", \\\"utf-8\\\", \\\"strict\\\", \\\"/:%\\\"),\\n ],\\n )\\n )\\n if auth:\\n rv = f\\\"{auth}@{rv}\\\"\\n return rv\\n\\n def decode_netloc(self) -> str:\\n \\\"\\\"\\\"Decodes the netloc part into a string.\\\"\\\"\\\"\\n rv = _decode_idna(self.host or \\\"\\\")\\n\\n if \\\":\\\" in rv:\\n rv = f\\\"[{rv}]\\\"\\n port = self.port\\n if port is not None:\\n rv = f\\\"{rv}:{port}\\\"\\n auth = \\\":\\\".join(\\n filter(\\n None,\\n [\\n _url_unquote_legacy(self.raw_username or \\\"\\\", \\\"/:%@\\\"),\\n _url_unquote_legacy(self.raw_password or \\\"\\\", \\\"/:%@\\\"),\\n ],\\n )\\n )\\n if auth:\\n rv = f\\\"{auth}@{rv}\\\"\\n return rv\\n\\n def to_uri_tuple(self) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Returns a :class:`BytesURL` tuple that holds a URI. This will\\n encode all the information in the URL properly to ASCII using the\\n rules a web browser would follow.\\n\\n It's usually more interesting to directly call :meth:`iri_to_uri` which\\n will return a string.\\n \\\"\\\"\\\"\\n return url_parse(iri_to_uri(self))\\n\\n def to_iri_tuple(self) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Returns a :class:`URL` tuple that holds a IRI. This will try\\n to decode as much information as possible in the URL without\\n losing information similar to how a web browser does it for the\\n URL bar.\\n\\n It's usually more interesting to directly call :meth:`uri_to_iri` which\\n will return a string.\\n \\\"\\\"\\\"\\n return url_parse(uri_to_iri(self))\\n\\n def get_file_location(\\n self, pathformat: t.Optional[str] = None\\n ) -> t.Tuple[t.Optional[str], t.Optional[str]]:\\n \\\"\\\"\\\"Returns a tuple with the location of the file in the form\\n ``(server, location)``. If the netloc is empty in the URL or\\n points to localhost, it's represented as ``None``.\\n\\n The `pathformat` by default is autodetection but needs to be set\\n when working with URLs of a specific system. The supported values\\n are ``'windows'`` when working with Windows or DOS paths and\\n ``'posix'`` when working with posix paths.\\n\\n If the URL does not point to a local file, the server and location\\n are both represented as ``None``.\\n\\n :param pathformat: The expected format of the path component.\\n Currently ``'windows'`` and ``'posix'`` are\\n supported. Defaults to ``None`` which is\\n autodetect.\\n \\\"\\\"\\\"\\n if self.scheme != \\\"file\\\":\\n return None, None\\n\\n path = url_unquote(self.path)\\n host = self.netloc or None\\n\\n if pathformat is None:\\n if os.name == \\\"nt\\\":\\n pathformat = \\\"windows\\\"\\n else:\\n pathformat = \\\"posix\\\"\\n\\n if pathformat == \\\"windows\\\":\\n if path[:1] == \\\"/\\\" and path[1:2].isalpha() and path[2:3] in \\\"|:\\\":\\n path = f\\\"{path[1:2]}:{path[3:]}\\\"\\n windows_share = path[:3] in (\\\"\\\\\\\\\\\" * 3, \\\"/\\\" * 3)\\n import ntpath\\n\\n path = ntpath.normpath(path)\\n # Windows shared drives are represented as ``\\\\\\\\host\\\\\\\\directory``.\\n # That results in a URL like ``file://///host/directory``, and a\\n # path like ``///host/directory``. We need to special-case this\\n # because the path contains the hostname.\\n if windows_share and host is None:\\n parts = path.lstrip(\\\"\\\\\\\\\\\").split(\\\"\\\\\\\\\\\", 1)\\n if len(parts) == 2:\\n host, path = parts\\n else:\\n host = parts[0]\\n path = \\\"\\\"\\n elif pathformat == \\\"posix\\\":\\n import posixpath\\n\\n path = posixpath.normpath(path)\\n else:\\n raise TypeError(f\\\"Invalid path format {pathformat!r}\\\")\\n\\n if host in (\\\"127.0.0.1\\\", \\\"::1\\\", \\\"localhost\\\"):\\n host = None\\n\\n return host, path\\n\\n def _split_netloc(self) -> t.Tuple[t.Optional[str], str]:\\n if self._at in self.netloc:\\n auth, _, netloc = self.netloc.partition(self._at)\\n return auth, netloc\\n return None, self.netloc\\n\\n def _split_auth(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\\n auth = self._split_netloc()[0]\\n if not auth:\\n return None, None\\n if self._colon not in auth:\\n return auth, None\\n\\n username, _, password = auth.partition(self._colon)\\n return username, password\\n\\n def _split_host(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\\n rv = self._split_netloc()[1]\\n if not rv:\\n return None, None\\n\\n if not rv.startswith(self._lbracket):\\n if self._colon in rv:\\n host, _, port = rv.partition(self._colon)\\n return host, port\\n return rv, None\\n\\n idx = rv.find(self._rbracket)\\n if idx < 0:\\n return rv, None\\n\\n host = rv[1:idx]\\n rest = rv[idx + 1 :]\\n if rest.startswith(self._colon):\\n return host, rest[1:]\\n return host, None\\n\\n\\nclass URL(BaseURL):\\n \\\"\\\"\\\"Represents a parsed URL. This behaves like a regular tuple but\\n also has some extra attributes that give further insight into the\\n URL.\\n \\\"\\\"\\\"\\n\\n __slots__ = ()\\n _at = \\\"@\\\"\\n _colon = \\\":\\\"\\n _lbracket = \\\"[\\\"\\n _rbracket = \\\"]\\\"\\n\\n def encode(self, charset: str = \\\"utf-8\\\", errors: str = \\\"replace\\\") -> \\\"BytesURL\\\":\\n \\\"\\\"\\\"Encodes the URL to a tuple made out of bytes. The charset is\\n only being used for the path, query and fragment.\\n \\\"\\\"\\\"\\n return BytesURL(\\n self.scheme.encode(\\\"ascii\\\"), # type: ignore\\n self.encode_netloc(),\\n self.path.encode(charset, errors), # type: ignore\\n self.query.encode(charset, errors), # type: ignore\\n self.fragment.encode(charset, errors), # type: ignore\\n )\\n\\n\\nclass BytesURL(BaseURL):\\n \\\"\\\"\\\"Represents a parsed URL in bytes.\\\"\\\"\\\"\\n\\n __slots__ = ()\\n _at = b\\\"@\\\" # type: ignore\\n _colon = b\\\":\\\" # type: ignore\\n _lbracket = b\\\"[\\\" # type: ignore\\n _rbracket = b\\\"]\\\" # type: ignore\\n\\n def __str__(self) -> str:\\n return self.to_url().decode(\\\"utf-8\\\", \\\"replace\\\") # type: ignore\\n\\n def encode_netloc(self) -> bytes: # type: ignore\\n \\\"\\\"\\\"Returns the netloc unchanged as bytes.\\\"\\\"\\\"\\n return self.netloc # type: ignore\\n\\n def decode(self, charset: str = \\\"utf-8\\\", errors: str = \\\"replace\\\") -> \\\"URL\\\":\\n \\\"\\\"\\\"Decodes the URL to a tuple made out of strings. The charset is\\n only being used for the path, query and fragment.\\n \\\"\\\"\\\"\\n return URL(\\n self.scheme.decode(\\\"ascii\\\"), # type: ignore\\n self.decode_netloc(),\\n self.path.decode(charset, errors), # type: ignore\\n self.query.decode(charset, errors), # type: ignore\\n self.fragment.decode(charset, errors), # type: ignore\\n )\\n\\n\\n_unquote_maps: t.Dict[t.FrozenSet[int], t.Dict[bytes, int]] = {frozenset(): _hextobyte}\\n\\n\\ndef _unquote_to_bytes(\\n string: t.Union[str, bytes], unsafe: t.Union[str, bytes] = \\\"\\\"\\n) -> bytes:\\n if isinstance(string, str):\\n string = string.encode(\\\"utf-8\\\")\\n\\n if isinstance(unsafe, str):\\n unsafe = unsafe.encode(\\\"utf-8\\\")\\n\\n unsafe = frozenset(bytearray(unsafe))\\n groups = iter(string.split(b\\\"%\\\"))\\n result = bytearray(next(groups, b\\\"\\\"))\\n\\n try:\\n hex_to_byte = _unquote_maps[unsafe]\\n except KeyError:\\n hex_to_byte = _unquote_maps[unsafe] = {\\n h: b for h, b in _hextobyte.items() if b not in unsafe\\n }\\n\\n for group in groups:\\n code = group[:2]\\n\\n if code in hex_to_byte:\\n result.append(hex_to_byte[code])\\n result.extend(group[2:])\\n else:\\n result.append(37) # %\\n result.extend(group)\\n\\n return bytes(result)\\n\\n\\ndef _url_encode_impl(\\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\\n charset: str,\\n sort: bool,\\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]],\\n) -> t.Iterator[str]:\\n from .datastructures import iter_multi_items\\n\\n iterable: t.Iterable[t.Tuple[str, str]] = iter_multi_items(obj)\\n\\n if sort:\\n iterable = sorted(iterable, key=key)\\n\\n for key_str, value_str in iterable:\\n if value_str is None:\\n continue\\n\\n if not isinstance(key_str, bytes):\\n key_bytes = str(key_str).encode(charset)\\n else:\\n key_bytes = key_str\\n\\n if not isinstance(value_str, bytes):\\n value_bytes = str(value_str).encode(charset)\\n else:\\n value_bytes = value_str\\n\\n yield f\\\"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}\\\"\\n\\n\\ndef _url_unquote_legacy(value: str, unsafe: str = \\\"\\\") -> str:\\n try:\\n return url_unquote(value, charset=\\\"utf-8\\\", errors=\\\"strict\\\", unsafe=unsafe)\\n except UnicodeError:\\n return url_unquote(value, charset=\\\"latin1\\\", unsafe=unsafe)\\n\\n\\ndef url_parse(\\n url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True\\n) -> BaseURL:\\n \\\"\\\"\\\"Parses a URL from a string into a :class:`URL` tuple. If the URL\\n is lacking a scheme it can be provided as second argument. Otherwise,\\n it is ignored. Optionally fragments can be stripped from the URL\\n by setting `allow_fragments` to `False`.\\n\\n The inverse of this function is :func:`url_unparse`.\\n\\n :param url: the URL to parse.\\n :param scheme: the default schema to use if the URL is schemaless.\\n :param allow_fragments: if set to `False` a fragment will be removed\\n from the URL.\\n \\\"\\\"\\\"\\n s = _make_encode_wrapper(url)\\n is_text_based = isinstance(url, str)\\n\\n if scheme is None:\\n scheme = s(\\\"\\\")\\n netloc = query = fragment = s(\\\"\\\")\\n i = url.find(s(\\\":\\\"))\\n if i > 0 and _scheme_re.match(_to_str(url[:i], errors=\\\"replace\\\")):\\n # make sure \\\"iri\\\" is not actually a port number (in which case\\n # \\\"scheme\\\" is really part of the path)\\n rest = url[i + 1 :]\\n if not rest or any(c not in s(\\\"0123456789\\\") for c in rest):\\n # not a port number\\n scheme, url = url[:i].lower(), rest\\n\\n if url[:2] == s(\\\"//\\\"):\\n delim = len(url)\\n for c in s(\\\"/?#\\\"):\\n wdelim = url.find(c, 2)\\n if wdelim >= 0:\\n delim = min(delim, wdelim)\\n netloc, url = url[2:delim], url[delim:]\\n if (s(\\\"[\\\") in netloc and s(\\\"]\\\") not in netloc) or (\\n s(\\\"]\\\") in netloc and s(\\\"[\\\") not in netloc\\n ):\\n raise ValueError(\\\"Invalid IPv6 URL\\\")\\n\\n if allow_fragments and s(\\\"#\\\") in url:\\n url, fragment = url.split(s(\\\"#\\\"), 1)\\n if s(\\\"?\\\") in url:\\n url, query = url.split(s(\\\"?\\\"), 1)\\n\\n result_type = URL if is_text_based else BytesURL\\n return result_type(scheme, netloc, url, query, fragment)\\n\\n\\ndef _make_fast_url_quote(\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"strict\\\",\\n safe: t.Union[str, bytes] = \\\"/:\\\",\\n unsafe: t.Union[str, bytes] = \\\"\\\",\\n) -> t.Callable[[bytes], str]:\\n \\\"\\\"\\\"Precompile the translation table for a URL encoding function.\\n\\n Unlike :func:`url_quote`, the generated function only takes the\\n string to quote.\\n\\n :param charset: The charset to encode the result with.\\n :param errors: How to handle encoding errors.\\n :param safe: An optional sequence of safe characters to never encode.\\n :param unsafe: An optional sequence of unsafe characters to always encode.\\n \\\"\\\"\\\"\\n if isinstance(safe, str):\\n safe = safe.encode(charset, errors)\\n\\n if isinstance(unsafe, str):\\n unsafe = unsafe.encode(charset, errors)\\n\\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\\n table = [chr(c) if c in safe else f\\\"%{c:02X}\\\" for c in range(256)]\\n\\n def quote(string: bytes) -> str:\\n return \\\"\\\".join([table[c] for c in string])\\n\\n return quote\\n\\n\\n_fast_url_quote = _make_fast_url_quote()\\n_fast_quote_plus = _make_fast_url_quote(safe=\\\" \\\", unsafe=\\\"+\\\")\\n\\n\\ndef _fast_url_quote_plus(string: bytes) -> str:\\n return _fast_quote_plus(string).replace(\\\" \\\", \\\"+\\\")\\n\\n\\ndef url_quote(\\n string: t.Union[str, bytes],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"strict\\\",\\n safe: t.Union[str, bytes] = \\\"/:\\\",\\n unsafe: t.Union[str, bytes] = \\\"\\\",\\n) -> str:\\n \\\"\\\"\\\"URL encode a single string with a given encoding.\\n\\n :param s: the string to quote.\\n :param charset: the charset to be used.\\n :param safe: an optional sequence of safe characters.\\n :param unsafe: an optional sequence of unsafe characters.\\n\\n .. versionadded:: 0.9.2\\n The `unsafe` parameter was added.\\n \\\"\\\"\\\"\\n if not isinstance(string, (str, bytes, bytearray)):\\n string = str(string)\\n if isinstance(string, str):\\n string = string.encode(charset, errors)\\n if isinstance(safe, str):\\n safe = safe.encode(charset, errors)\\n if isinstance(unsafe, str):\\n unsafe = unsafe.encode(charset, errors)\\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\\n rv = bytearray()\\n for char in bytearray(string):\\n if char in safe:\\n rv.append(char)\\n else:\\n rv.extend(_bytetohex[char])\\n return bytes(rv).decode(charset)\\n\\n\\ndef url_quote_plus(\\n string: str, charset: str = \\\"utf-8\\\", errors: str = \\\"strict\\\", safe: str = \\\"\\\"\\n) -> str:\\n \\\"\\\"\\\"URL encode a single string with the given encoding and convert\\n whitespace to \\\"+\\\".\\n\\n :param s: The string to quote.\\n :param charset: The charset to be used.\\n :param safe: An optional sequence of safe characters.\\n \\\"\\\"\\\"\\n return url_quote(string, charset, errors, safe + \\\" \\\", \\\"+\\\").replace(\\\" \\\", \\\"+\\\")\\n\\n\\ndef url_unparse(components: t.Tuple[str, str, str, str, str]) -> str:\\n \\\"\\\"\\\"The reverse operation to :meth:`url_parse`. This accepts arbitrary\\n as well as :class:`URL` tuples and returns a URL as a string.\\n\\n :param components: the parsed URL as tuple which should be converted\\n into a URL string.\\n \\\"\\\"\\\"\\n _check_str_tuple(components)\\n scheme, netloc, path, query, fragment = components\\n s = _make_encode_wrapper(scheme)\\n url = s(\\\"\\\")\\n\\n # We generally treat file:///x and file:/x the same which is also\\n # what browsers seem to do. This also allows us to ignore a schema\\n # register for netloc utilization or having to differentiate between\\n # empty and missing netloc.\\n if netloc or (scheme and path.startswith(s(\\\"/\\\"))):\\n if path and path[:1] != s(\\\"/\\\"):\\n path = s(\\\"/\\\") + path\\n url = s(\\\"//\\\") + (netloc or s(\\\"\\\")) + path\\n elif path:\\n url += path\\n if scheme:\\n url = scheme + s(\\\":\\\") + url\\n if query:\\n url = url + s(\\\"?\\\") + query\\n if fragment:\\n url = url + s(\\\"#\\\") + fragment\\n return url\\n\\n\\ndef url_unquote(\\n s: t.Union[str, bytes],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"replace\\\",\\n unsafe: str = \\\"\\\",\\n) -> str:\\n \\\"\\\"\\\"URL decode a single string with a given encoding. If the charset\\n is set to `None` no decoding is performed and raw bytes are\\n returned.\\n\\n :param s: the string to unquote.\\n :param charset: the charset of the query string. If set to `None`\\n no decoding will take place.\\n :param errors: the error handling for the charset decoding.\\n \\\"\\\"\\\"\\n rv = _unquote_to_bytes(s, unsafe)\\n if charset is None:\\n return rv\\n return rv.decode(charset, errors)\\n\\n\\ndef url_unquote_plus(\\n s: t.Union[str, bytes], charset: str = \\\"utf-8\\\", errors: str = \\\"replace\\\"\\n) -> str:\\n \\\"\\\"\\\"URL decode a single string with the given `charset` and decode \\\"+\\\" to\\n whitespace.\\n\\n Per default encoding errors are ignored. If you want a different behavior\\n you can set `errors` to ``'replace'`` or ``'strict'``.\\n\\n :param s: The string to unquote.\\n :param charset: the charset of the query string. If set to `None`\\n no decoding will take place.\\n :param errors: The error handling for the `charset` decoding.\\n \\\"\\\"\\\"\\n if isinstance(s, str):\\n s = s.replace(\\\"+\\\", \\\" \\\")\\n else:\\n s = s.replace(b\\\"+\\\", b\\\" \\\")\\n return url_unquote(s, charset, errors)\\n\\n\\ndef url_fix(s: str, charset: str = \\\"utf-8\\\") -> str:\\n r\\\"\\\"\\\"Sometimes you get an URL by a user that just isn't a real URL because\\n it contains unsafe characters like ' ' and so on. This function can fix\\n some of the problems in a similar way browsers handle data entered by the\\n user:\\n\\n >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\\\\xe4rung)')\\n 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'\\n\\n :param s: the string with the URL to fix.\\n :param charset: The target charset for the URL if the url was given\\n as a string.\\n \\\"\\\"\\\"\\n # First step is to switch to text processing and to convert\\n # backslashes (which are invalid in URLs anyways) to slashes. This is\\n # consistent with what Chrome does.\\n s = _to_str(s, charset, \\\"replace\\\").replace(\\\"\\\\\\\\\\\", \\\"/\\\")\\n\\n # For the specific case that we look like a malformed windows URL\\n # we want to fix this up manually:\\n if s.startswith(\\\"file://\\\") and s[7:8].isalpha() and s[8:10] in (\\\":/\\\", \\\"|/\\\"):\\n s = f\\\"file:///{s[7:]}\\\"\\n\\n url = url_parse(s)\\n path = url_quote(url.path, charset, safe=\\\"/%+$!*'(),\\\")\\n qs = url_quote_plus(url.query, charset, safe=\\\":&%=+$!*'(),\\\")\\n anchor = url_quote_plus(url.fragment, charset, safe=\\\":&%=+$!*'(),\\\")\\n return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))\\n\\n\\n# not-unreserved characters remain quoted when unquoting to IRI\\n_to_iri_unsafe = \\\"\\\".join([chr(c) for c in range(128) if c not in _always_safe])\\n\\n\\ndef _codec_error_url_quote(e: UnicodeError) -> t.Tuple[str, int]:\\n \\\"\\\"\\\"Used in :func:`uri_to_iri` after unquoting to re-quote any\\n invalid bytes.\\n \\\"\\\"\\\"\\n # the docs state that UnicodeError does have these attributes,\\n # but mypy isn't picking them up\\n out = _fast_url_quote(e.object[e.start : e.end]) # type: ignore\\n return out, e.end # type: ignore\\n\\n\\ncodecs.register_error(\\\"werkzeug.url_quote\\\", _codec_error_url_quote)\\n\\n\\ndef uri_to_iri(\\n uri: t.Union[str, t.Tuple[str, str, str, str, str]],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"werkzeug.url_quote\\\",\\n) -> str:\\n \\\"\\\"\\\"Convert a URI to an IRI. All valid UTF-8 characters are unquoted,\\n leaving all reserved and invalid characters quoted. If the URL has\\n a domain, it is decoded from Punycode.\\n\\n >>> uri_to_iri(\\\"http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF\\\")\\n 'http://\\\\\\\\u2603.net/p\\\\\\\\xe5th?q=\\\\\\\\xe8ry%DF'\\n\\n :param uri: The URI to convert.\\n :param charset: The encoding to encode unquoted bytes with.\\n :param errors: Error handler to use during ``bytes.encode``. By\\n default, invalid bytes are left quoted.\\n\\n .. versionchanged:: 0.15\\n All reserved and invalid characters remain quoted. Previously,\\n only some reserved characters were preserved, and invalid bytes\\n were replaced instead of left quoted.\\n\\n .. versionadded:: 0.6\\n \\\"\\\"\\\"\\n if isinstance(uri, tuple):\\n uri = url_unparse(uri)\\n\\n uri = url_parse(_to_str(uri, charset))\\n path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)\\n query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)\\n fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)\\n return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))\\n\\n\\n# reserved characters remain unquoted when quoting to URI\\n_to_uri_safe = \\\":/?#[]@!$&'()*+,;=%\\\"\\n\\n\\ndef iri_to_uri(\\n iri: t.Union[str, t.Tuple[str, str, str, str, str]],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"strict\\\",\\n safe_conversion: bool = False,\\n) -> str:\\n \\\"\\\"\\\"Convert an IRI to a URI. All non-ASCII and unsafe characters are\\n quoted. If the URL has a domain, it is encoded to Punycode.\\n\\n >>> iri_to_uri('http://\\\\\\\\u2603.net/p\\\\\\\\xe5th?q=\\\\\\\\xe8ry%DF')\\n 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'\\n\\n :param iri: The IRI to convert.\\n :param charset: The encoding of the IRI.\\n :param errors: Error handler to use during ``bytes.encode``.\\n :param safe_conversion: Return the URL unchanged if it only contains\\n ASCII characters and no whitespace. See the explanation below.\\n\\n There is a general problem with IRI conversion with some protocols\\n that are in violation of the URI specification. Consider the\\n following two IRIs::\\n\\n magnet:?xt=uri:whatever\\n itms-services://?action=download-manifest\\n\\n After parsing, we don't know if the scheme requires the ``//``,\\n which is dropped if empty, but conveys different meanings in the\\n final URL if it's present or not. In this case, you can use\\n ``safe_conversion``, which will return the URL unchanged if it only\\n contains ASCII characters and no whitespace. This can result in a\\n URI with unquoted characters if it was not already quoted correctly,\\n but preserves the URL's semantics. Werkzeug uses this for the\\n ``Location`` header for redirects.\\n\\n .. versionchanged:: 0.15\\n All reserved characters remain unquoted. Previously, only some\\n reserved characters were left unquoted.\\n\\n .. versionchanged:: 0.9.6\\n The ``safe_conversion`` parameter was added.\\n\\n .. versionadded:: 0.6\\n \\\"\\\"\\\"\\n if isinstance(iri, tuple):\\n iri = url_unparse(iri)\\n\\n if safe_conversion:\\n # If we're not sure if it's safe to convert the URL, and it only\\n # contains ASCII characters, return it unconverted.\\n try:\\n native_iri = _to_str(iri)\\n ascii_iri = native_iri.encode(\\\"ascii\\\")\\n\\n # Only return if it doesn't have whitespace. (Why?)\\n if len(ascii_iri.split()) == 1:\\n return native_iri\\n except UnicodeError:\\n pass\\n\\n iri = url_parse(_to_str(iri, charset, errors))\\n path = url_quote(iri.path, charset, errors, _to_uri_safe)\\n query = url_quote(iri.query, charset, errors, _to_uri_safe)\\n fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)\\n return url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))\\n\\n\\ndef url_decode(\\n s: t.AnyStr,\\n charset: str = \\\"utf-8\\\",\\n include_empty: bool = True,\\n errors: str = \\\"replace\\\",\\n separator: str = \\\"&\\\",\\n cls: t.Optional[t.Type[\\\"ds.MultiDict\\\"]] = None,\\n) -> \\\"ds.MultiDict[str, str]\\\":\\n \\\"\\\"\\\"Parse a query string and return it as a :class:`MultiDict`.\\n\\n :param s: The query string to parse.\\n :param charset: Decode bytes to string with this charset. If not\\n given, bytes are returned as-is.\\n :param include_empty: Include keys with empty values in the dict.\\n :param errors: Error handling behavior when decoding bytes.\\n :param separator: Separator character between pairs.\\n :param cls: Container to hold result instead of :class:`MultiDict`.\\n\\n .. versionchanged:: 2.0\\n The ``decode_keys`` parameter is deprecated and will be removed\\n in Werkzeug 2.1.\\n\\n .. versionchanged:: 0.5\\n In previous versions \\\";\\\" and \\\"&\\\" could be used for url decoding.\\n Now only \\\"&\\\" is supported. If you want to use \\\";\\\", a different\\n ``separator`` can be provided.\\n\\n .. versionchanged:: 0.5\\n The ``cls`` parameter was added.\\n \\\"\\\"\\\"\\n if cls is None:\\n from .datastructures import MultiDict # noqa: F811\\n\\n cls = MultiDict\\n if isinstance(s, str) and not isinstance(separator, str):\\n separator = separator.decode(charset or \\\"ascii\\\")\\n elif isinstance(s, bytes) and not isinstance(separator, bytes):\\n separator = separator.encode(charset or \\\"ascii\\\") # type: ignore\\n return cls(\\n _url_decode_impl(\\n s.split(separator), charset, include_empty, errors # type: ignore\\n )\\n )\\n\\n\\ndef url_decode_stream(\\n stream: t.IO[bytes],\\n charset: str = \\\"utf-8\\\",\\n include_empty: bool = True,\\n errors: str = \\\"replace\\\",\\n separator: bytes = b\\\"&\\\",\\n cls: t.Optional[t.Type[\\\"ds.MultiDict\\\"]] = None,\\n limit: t.Optional[int] = None,\\n) -> \\\"ds.MultiDict[str, str]\\\":\\n \\\"\\\"\\\"Works like :func:`url_decode` but decodes a stream. The behavior\\n of stream and limit follows functions like\\n :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is\\n directly fed to the `cls` so you can consume the data while it's\\n parsed.\\n\\n :param stream: a stream with the encoded querystring\\n :param charset: the charset of the query string. If set to `None`\\n no decoding will take place.\\n :param include_empty: Set to `False` if you don't want empty values to\\n appear in the dict.\\n :param errors: the decoding error behavior.\\n :param separator: the pair separator to be used, defaults to ``&``\\n :param cls: an optional dict class to use. If this is not specified\\n or `None` the default :class:`MultiDict` is used.\\n :param limit: the content length of the URL data. Not necessary if\\n a limited stream is provided.\\n\\n .. versionchanged:: 2.0\\n The ``decode_keys`` and ``return_iterator`` parameters are\\n deprecated and will be removed in Werkzeug 2.1.\\n\\n .. versionadded:: 0.8\\n \\\"\\\"\\\"\\n from .wsgi import make_chunk_iter\\n\\n pair_iter = make_chunk_iter(stream, separator, limit)\\n decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)\\n\\n if cls is None:\\n from .datastructures import MultiDict # noqa: F811\\n\\n cls = MultiDict\\n\\n return cls(decoder)\\n\\n\\ndef _url_decode_impl(\\n pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str\\n) -> t.Iterator[t.Tuple[str, str]]:\\n for pair in pair_iter:\\n if not pair:\\n continue\\n s = _make_encode_wrapper(pair)\\n equal = s(\\\"=\\\")\\n if equal in pair:\\n key, value = pair.split(equal, 1)\\n else:\\n if not include_empty:\\n continue\\n key = pair\\n value = s(\\\"\\\")\\n yield (\\n url_unquote_plus(key, charset, errors),\\n url_unquote_plus(value, charset, errors),\\n )\\n\\n\\ndef url_encode(\\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\\n charset: str = \\\"utf-8\\\",\\n sort: bool = False,\\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\\n separator: str = \\\"&\\\",\\n) -> str:\\n \\\"\\\"\\\"URL encode a dict/`MultiDict`. If a value is `None` it will not appear\\n in the result string. Per default only values are encoded into the target\\n charset strings.\\n\\n :param obj: the object to encode into a query string.\\n :param charset: the charset of the query string.\\n :param sort: set to `True` if you want parameters to be sorted by `key`.\\n :param separator: the separator to be used for the pairs.\\n :param key: an optional function to be used for sorting. For more details\\n check out the :func:`sorted` documentation.\\n\\n .. versionchanged:: 2.0\\n The ``encode_keys`` parameter is deprecated and will be removed\\n in Werkzeug 2.1.\\n\\n .. versionchanged:: 0.5\\n Added the ``sort``, ``key``, and ``separator`` parameters.\\n \\\"\\\"\\\"\\n separator = _to_str(separator, \\\"ascii\\\")\\n return separator.join(_url_encode_impl(obj, charset, sort, key))\\n\\n\\ndef url_encode_stream(\\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\\n stream: t.Optional[t.IO[str]] = None,\\n charset: str = \\\"utf-8\\\",\\n sort: bool = False,\\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\\n separator: str = \\\"&\\\",\\n) -> None:\\n \\\"\\\"\\\"Like :meth:`url_encode` but writes the results to a stream\\n object. If the stream is `None` a generator over all encoded\\n pairs is returned.\\n\\n :param obj: the object to encode into a query string.\\n :param stream: a stream to write the encoded object into or `None` if\\n an iterator over the encoded pairs should be returned. In\\n that case the separator argument is ignored.\\n :param charset: the charset of the query string.\\n :param sort: set to `True` if you want parameters to be sorted by `key`.\\n :param separator: the separator to be used for the pairs.\\n :param key: an optional function to be used for sorting. For more details\\n check out the :func:`sorted` documentation.\\n\\n .. versionchanged:: 2.0\\n The ``encode_keys`` parameter is deprecated and will be removed\\n in Werkzeug 2.1.\\n\\n .. versionadded:: 0.8\\n \\\"\\\"\\\"\\n separator = _to_str(separator, \\\"ascii\\\")\\n gen = _url_encode_impl(obj, charset, sort, key)\\n if stream is None:\\n return gen # type: ignore\\n for idx, chunk in enumerate(gen):\\n if idx:\\n stream.write(separator)\\n stream.write(chunk)\\n return None\\n\\n\\ndef url_join(\\n base: t.Union[str, t.Tuple[str, str, str, str, str]],\\n url: t.Union[str, t.Tuple[str, str, str, str, str]],\\n allow_fragments: bool = True,\\n) -> str:\\n \\\"\\\"\\\"Join a base URL and a possibly relative URL to form an absolute\\n interpretation of the latter.\\n\\n :param base: the base URL for the join operation.\\n :param url: the URL to join.\\n :param allow_fragments: indicates whether fragments should be allowed.\\n \\\"\\\"\\\"\\n if isinstance(base, tuple):\\n base = url_unparse(base)\\n if isinstance(url, tuple):\\n url = url_unparse(url)\\n\\n _check_str_tuple((base, url))\\n s = _make_encode_wrapper(base)\\n\\n if not base:\\n return url\\n if not url:\\n return base\\n\\n bscheme, bnetloc, bpath, bquery, bfragment = url_parse(\\n base, allow_fragments=allow_fragments\\n )\\n scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)\\n if scheme != bscheme:\\n return url\\n if netloc:\\n return url_unparse((scheme, netloc, path, query, fragment))\\n netloc = bnetloc\\n\\n if path[:1] == s(\\\"/\\\"):\\n segments = path.split(s(\\\"/\\\"))\\n elif not path:\\n segments = bpath.split(s(\\\"/\\\"))\\n if not query:\\n query = bquery\\n else:\\n segments = bpath.split(s(\\\"/\\\"))[:-1] + path.split(s(\\\"/\\\"))\\n\\n # If the rightmost part is \\\"./\\\" we want to keep the slash but\\n # remove the dot.\\n if segments[-1] == s(\\\".\\\"):\\n segments[-1] = s(\\\"\\\")\\n\\n # Resolve \\\"..\\\" and \\\".\\\"\\n segments = [segment for segment in segments if segment != s(\\\".\\\")]\\n while True:\\n i = 1\\n n = len(segments) - 1\\n while i < n:\\n if segments[i] == s(\\\"..\\\") and segments[i - 1] not in (s(\\\"\\\"), s(\\\"..\\\")):\\n del segments[i - 1 : i + 1]\\n break\\n i += 1\\n else:\\n break\\n\\n # Remove trailing \\\"..\\\" if the URL is absolute\\n unwanted_marker = [s(\\\"\\\"), s(\\\"..\\\")]\\n while segments[:2] == unwanted_marker:\\n del segments[1]\\n\\n path = s(\\\"/\\\").join(segments)\\n return url_unparse((scheme, netloc, path, query, fragment))\\n\",\n \"path\": \"src/werkzeug/urls.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"\"\"\"Functions for working with URLs.\n\nContains implementations of functions from :mod:`urllib.parse` that\nhandle bytes and strings.\n\"\"\"\nimport codecs\nimport os\nimport re\nimport typing as t\n\nfrom ._internal import _check_str_tuple\nfrom ._internal import _decode_idna\nfrom ._internal import _encode_idna\nfrom ._internal import _make_encode_wrapper\nfrom ._internal import _to_str\n\nif t.TYPE_CHECKING:\n from . import datastructures as ds\n\n# A regular expression for what a valid schema looks like\n_scheme_re = re.compile(r\"^[a-zA-Z0-9+-.]+$\")\n\n# Characters that are safe in any part of an URL.\n_always_safe = frozenset(\n bytearray(\n b\"abcdefghijklmnopqrstuvwxyz\"\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n b\"0123456789\"\n b\"-._~\"\n b\"$!'()*+,;\" # RFC3986 sub-delims set, not including query string delimiters &=\n )\n)\n\n_hexdigits = \"0123456789ABCDEFabcdef\"\n_hextobyte = {\n f\"{a}{b}\".encode(\"ascii\"): int(f\"{a}{b}\", 16)\n for a in _hexdigits\n for b in _hexdigits\n}\n_bytetohex = [f\"%{char:02X}\".encode(\"ascii\") for char in range(256)]\n\n\nclass _URLTuple(t.NamedTuple):\n scheme: str\n netloc: str\n path: str\n query: str\n fragment: str\n\n\nclass BaseURL(_URLTuple):\n \"\"\"Superclass of :py:class:`URL` and :py:class:`BytesURL`.\"\"\"\n\n __slots__ = ()\n _at: str\n _colon: str\n _lbracket: str\n _rbracket: str\n\n def __str__(self) -> str:\n return self.to_url()\n\n def replace(self, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Return an URL with the same values, except for those parameters\n given new values by whichever keyword arguments are specified.\"\"\"\n return self._replace(**kwargs)\n\n @property\n def host(self) -> t.Optional[str]:\n \"\"\"The host part of the URL if available, otherwise `None`. The\n host is either the hostname or the IP address mentioned in the\n URL. It will not contain the port.\n \"\"\"\n return self._split_host()[0]\n\n @property\n def ascii_host(self) -> t.Optional[str]:\n \"\"\"Works exactly like :attr:`host` but will return a result that\n is restricted to ASCII. If it finds a netloc that is not ASCII\n it will attempt to idna decode it. This is useful for socket\n operations when the URL might include internationalized characters.\n \"\"\"\n rv = self.host\n if rv is not None and isinstance(rv, str):\n try:\n rv = _encode_idna(rv) # type: ignore\n except UnicodeError:\n rv = rv.encode(\"ascii\", \"ignore\") # type: ignore\n return _to_str(rv, \"ascii\", \"ignore\")\n\n @property\n def port(self) -> t.Optional[int]:\n \"\"\"The port in the URL as an integer if it was present, `None`\n otherwise. This does not fill in default ports.\n \"\"\"\n try:\n rv = int(_to_str(self._split_host()[1]))\n if 0 <= rv <= 65535:\n return rv\n except (ValueError, TypeError):\n pass\n return None\n\n @property\n def auth(self) -> t.Optional[str]:\n \"\"\"The authentication part in the URL if available, `None`\n otherwise.\n \"\"\"\n return self._split_netloc()[0]\n\n @property\n def username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[0]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n Unlike :attr:`username` this one is not being decoded.\n \"\"\"\n return self._split_auth()[0]\n\n @property\n def password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[1]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n Unlike :attr:`password` this one is not being decoded.\n \"\"\"\n return self._split_auth()[1]\n\n def decode_query(self, *args: t.Any, **kwargs: t.Any) -> \"ds.MultiDict[str, str]\":\n \"\"\"Decodes the query part of the URL. Ths is a shortcut for\n calling :func:`url_decode` on the query argument. The arguments and\n keyword arguments are forwarded to :func:`url_decode` unchanged.\n \"\"\"\n return url_decode(self.query, *args, **kwargs)\n\n def join(self, *args: t.Any, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Joins this URL with another one. This is just a convenience\n function for calling into :meth:`url_join` and then parsing the\n return value again.\n \"\"\"\n return url_parse(url_join(self, *args, **kwargs))\n\n def to_url(self) -> str:\n \"\"\"Returns a URL string or bytes depending on the type of the\n information stored. This is just a convenience function\n for calling :meth:`url_unparse` for this URL.\n \"\"\"\n return url_unparse(self)\n\n def encode_netloc(self) -> str:\n \"\"\"Encodes the netloc part to an ASCII safe URL as bytes.\"\"\"\n rv = self.ascii_host or \"\"\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n url_quote(self.raw_username or \"\", \"utf-8\", \"strict\", \"/:%\"),\n url_quote(self.raw_password or \"\", \"utf-8\", \"strict\", \"/:%\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def decode_netloc(self) -> str:\n \"\"\"Decodes the netloc part into a string.\"\"\"\n rv = _decode_idna(self.host or \"\")\n\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n _url_unquote_legacy(self.raw_username or \"\", \"/:%@\"),\n _url_unquote_legacy(self.raw_password or \"\", \"/:%@\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def to_uri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`BytesURL` tuple that holds a URI. This will\n encode all the information in the URL properly to ASCII using the\n rules a web browser would follow.\n\n It's usually more interesting to directly call :meth:`iri_to_uri` which\n will return a string.\n \"\"\"\n return url_parse(iri_to_uri(self))\n\n def to_iri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`URL` tuple that holds a IRI. This will try\n to decode as much information as possible in the URL without\n losing information similar to how a web browser does it for the\n URL bar.\n\n It's usually more interesting to directly call :meth:`uri_to_iri` which\n will return a string.\n \"\"\"\n return url_parse(uri_to_iri(self))\n\n def get_file_location(\n self, pathformat: t.Optional[str] = None\n ) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n \"\"\"Returns a tuple with the location of the file in the form\n ``(server, location)``. If the netloc is empty in the URL or\n points to localhost, it's represented as ``None``.\n\n The `pathformat` by default is autodetection but needs to be set\n when working with URLs of a specific system. The supported values\n are ``'windows'`` when working with Windows or DOS paths and\n ``'posix'`` when working with posix paths.\n\n If the URL does not point to a local file, the server and location\n are both represented as ``None``.\n\n :param pathformat: The expected format of the path component.\n Currently ``'windows'`` and ``'posix'`` are\n supported. Defaults to ``None`` which is\n autodetect.\n \"\"\"\n if self.scheme != \"file\":\n return None, None\n\n path = url_unquote(self.path)\n host = self.netloc or None\n\n if pathformat is None:\n if os.name == \"nt\":\n pathformat = \"windows\"\n else:\n pathformat = \"posix\"\n\n if pathformat == \"windows\":\n if path[:1] == \"/\" and path[1:2].isalpha() and path[2:3] in \"|:\":\n path = f\"{path[1:2]}:{path[3:]}\"\n windows_share = path[:3] in (\"\\\\\" * 3, \"/\" * 3)\n import ntpath\n\n path = ntpath.normpath(path)\n # Windows shared drives are represented as ``\\\\host\\\\directory``.\n # That results in a URL like ``file://///host/directory``, and a\n # path like ``///host/directory``. We need to special-case this\n # because the path contains the hostname.\n if windows_share and host is None:\n parts = path.lstrip(\"\\\\\").split(\"\\\\\", 1)\n if len(parts) == 2:\n host, path = parts\n else:\n host = parts[0]\n path = \"\"\n elif pathformat == \"posix\":\n import posixpath\n\n path = posixpath.normpath(path)\n else:\n raise TypeError(f\"Invalid path format {pathformat!r}\")\n\n if host in (\"127.0.0.1\", \"::1\", \"localhost\"):\n host = None\n\n return host, path\n\n def _split_netloc(self) -> t.Tuple[t.Optional[str], str]:\n if self._at in self.netloc:\n auth, _, netloc = self.netloc.partition(self._at)\n return auth, netloc\n return None, self.netloc\n\n def _split_auth(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n auth = self._split_netloc()[0]\n if not auth:\n return None, None\n if self._colon not in auth:\n return auth, None\n\n username, _, password = auth.partition(self._colon)\n return username, password\n\n def _split_host(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n rv = self._split_netloc()[1]\n if not rv:\n return None, None\n\n if not rv.startswith(self._lbracket):\n if self._colon in rv:\n host, _, port = rv.partition(self._colon)\n return host, port\n return rv, None\n\n idx = rv.find(self._rbracket)\n if idx < 0:\n return rv, None\n\n host = rv[1:idx]\n rest = rv[idx + 1 :]\n if rest.startswith(self._colon):\n return host, rest[1:]\n return host, None\n\n\nclass URL(BaseURL):\n \"\"\"Represents a parsed URL. This behaves like a regular tuple but\n also has some extra attributes that give further insight into the\n URL.\n \"\"\"\n\n __slots__ = ()\n _at = \"@\"\n _colon = \":\"\n _lbracket = \"[\"\n _rbracket = \"]\"\n\n def encode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"BytesURL\":\n \"\"\"Encodes the URL to a tuple made out of bytes. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return BytesURL(\n self.scheme.encode(\"ascii\"), # type: ignore\n self.encode_netloc(),\n self.path.encode(charset, errors), # type: ignore\n self.query.encode(charset, errors), # type: ignore\n self.fragment.encode(charset, errors), # type: ignore\n )\n\n\nclass BytesURL(BaseURL):\n \"\"\"Represents a parsed URL in bytes.\"\"\"\n\n __slots__ = ()\n _at = b\"@\" # type: ignore\n _colon = b\":\" # type: ignore\n _lbracket = b\"[\" # type: ignore\n _rbracket = b\"]\" # type: ignore\n\n def __str__(self) -> str:\n return self.to_url().decode(\"utf-8\", \"replace\") # type: ignore\n\n def encode_netloc(self) -> bytes: # type: ignore\n \"\"\"Returns the netloc unchanged as bytes.\"\"\"\n return self.netloc # type: ignore\n\n def decode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"URL\":\n \"\"\"Decodes the URL to a tuple made out of strings. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return URL(\n self.scheme.decode(\"ascii\"), # type: ignore\n self.decode_netloc(),\n self.path.decode(charset, errors), # type: ignore\n self.query.decode(charset, errors), # type: ignore\n self.fragment.decode(charset, errors), # type: ignore\n )\n\n\n_unquote_maps: t.Dict[t.FrozenSet[int], t.Dict[bytes, int]] = {frozenset(): _hextobyte}\n\n\ndef _unquote_to_bytes(\n string: t.Union[str, bytes], unsafe: t.Union[str, bytes] = \"\"\n) -> bytes:\n if isinstance(string, str):\n string = string.encode(\"utf-8\")\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(\"utf-8\")\n\n unsafe = frozenset(bytearray(unsafe))\n groups = iter(string.split(b\"%\"))\n result = bytearray(next(groups, b\"\"))\n\n try:\n hex_to_byte = _unquote_maps[unsafe]\n except KeyError:\n hex_to_byte = _unquote_maps[unsafe] = {\n h: b for h, b in _hextobyte.items() if b not in unsafe\n }\n\n for group in groups:\n code = group[:2]\n\n if code in hex_to_byte:\n result.append(hex_to_byte[code])\n result.extend(group[2:])\n else:\n result.append(37) # %\n result.extend(group)\n\n return bytes(result)\n\n\ndef _url_encode_impl(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str,\n sort: bool,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]],\n) -> t.Iterator[str]:\n from .datastructures import iter_multi_items\n\n iterable: t.Iterable[t.Tuple[str, str]] = iter_multi_items(obj)\n\n if sort:\n iterable = sorted(iterable, key=key)\n\n for key_str, value_str in iterable:\n if value_str is None:\n continue\n\n if not isinstance(key_str, bytes):\n key_bytes = str(key_str).encode(charset)\n else:\n key_bytes = key_str\n\n if not isinstance(value_str, bytes):\n value_bytes = str(value_str).encode(charset)\n else:\n value_bytes = value_str\n\n yield f\"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}\"\n\n\ndef _url_unquote_legacy(value: str, unsafe: str = \"\") -> str:\n try:\n return url_unquote(value, charset=\"utf-8\", errors=\"strict\", unsafe=unsafe)\n except UnicodeError:\n return url_unquote(value, charset=\"latin1\", unsafe=unsafe)\n\n\ndef url_parse(\n url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True\n) -> BaseURL:\n \"\"\"Parses a URL from a string into a :class:`URL` tuple. If the URL\n is lacking a scheme it can be provided as second argument. Otherwise,\n it is ignored. Optionally fragments can be stripped from the URL\n by setting `allow_fragments` to `False`.\n\n The inverse of this function is :func:`url_unparse`.\n\n :param url: the URL to parse.\n :param scheme: the default schema to use if the URL is schemaless.\n :param allow_fragments: if set to `False` a fragment will be removed\n from the URL.\n \"\"\"\n s = _make_encode_wrapper(url)\n is_text_based = isinstance(url, str)\n\n if scheme is None:\n scheme = s(\"\")\n netloc = query = fragment = s(\"\")\n i = url.find(s(\":\"))\n if i > 0 and _scheme_re.match(_to_str(url[:i], errors=\"replace\")):\n # make sure \"iri\" is not actually a port number (in which case\n # \"scheme\" is really part of the path)\n rest = url[i + 1 :]\n if not rest or any(c not in s(\"0123456789\") for c in rest):\n # not a port number\n scheme, url = url[:i].lower(), rest\n\n if url[:2] == s(\"//\"):\n delim = len(url)\n for c in s(\"/?#\"):\n wdelim = url.find(c, 2)\n if wdelim >= 0:\n delim = min(delim, wdelim)\n netloc, url = url[2:delim], url[delim:]\n if (s(\"[\") in netloc and s(\"]\") not in netloc) or (\n s(\"]\") in netloc and s(\"[\") not in netloc\n ):\n raise ValueError(\"Invalid IPv6 URL\")\n\n if allow_fragments and s(\"#\") in url:\n url, fragment = url.split(s(\"#\"), 1)\n if s(\"?\") in url:\n url, query = url.split(s(\"?\"), 1)\n\n result_type = URL if is_text_based else BytesURL\n return result_type(scheme, netloc, url, query, fragment)\n\n\ndef _make_fast_url_quote(\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> t.Callable[[bytes], str]:\n \"\"\"Precompile the translation table for a URL encoding function.\n\n Unlike :func:`url_quote`, the generated function only takes the\n string to quote.\n\n :param charset: The charset to encode the result with.\n :param errors: How to handle encoding errors.\n :param safe: An optional sequence of safe characters to never encode.\n :param unsafe: An optional sequence of unsafe characters to always encode.\n \"\"\"\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n table = [chr(c) if c in safe else f\"%{c:02X}\" for c in range(256)]\n\n def quote(string: bytes) -> str:\n return \"\".join([table[c] for c in string])\n\n return quote\n\n\n_fast_url_quote = _make_fast_url_quote()\n_fast_quote_plus = _make_fast_url_quote(safe=\" \", unsafe=\"+\")\n\n\ndef _fast_url_quote_plus(string: bytes) -> str:\n return _fast_quote_plus(string).replace(\" \", \"+\")\n\n\ndef url_quote(\n string: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> str:\n \"\"\"URL encode a single string with a given encoding.\n\n :param s: the string to quote.\n :param charset: the charset to be used.\n :param safe: an optional sequence of safe characters.\n :param unsafe: an optional sequence of unsafe characters.\n\n .. versionadded:: 0.9.2\n The `unsafe` parameter was added.\n \"\"\"\n if not isinstance(string, (str, bytes, bytearray)):\n string = str(string)\n if isinstance(string, str):\n string = string.encode(charset, errors)\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n rv = bytearray()\n for char in bytearray(string):\n if char in safe:\n rv.append(char)\n else:\n rv.extend(_bytetohex[char])\n return bytes(rv).decode(charset)\n\n\ndef url_quote_plus(\n string: str, charset: str = \"utf-8\", errors: str = \"strict\", safe: str = \"\"\n) -> str:\n \"\"\"URL encode a single string with the given encoding and convert\n whitespace to \"+\".\n\n :param s: The string to quote.\n :param charset: The charset to be used.\n :param safe: An optional sequence of safe characters.\n \"\"\"\n return url_quote(string, charset, errors, safe + \" \", \"+\").replace(\" \", \"+\")\n\n\ndef url_unparse(components: t.Tuple[str, str, str, str, str]) -> str:\n \"\"\"The reverse operation to :meth:`url_parse`. This accepts arbitrary\n as well as :class:`URL` tuples and returns a URL as a string.\n\n :param components: the parsed URL as tuple which should be converted\n into a URL string.\n \"\"\"\n _check_str_tuple(components)\n scheme, netloc, path, query, fragment = components\n s = _make_encode_wrapper(scheme)\n url = s(\"\")\n\n # We generally treat file:///x and file:/x the same which is also\n # what browsers seem to do. This also allows us to ignore a schema\n # register for netloc utilization or having to differentiate between\n # empty and missing netloc.\n if netloc or (scheme and path.startswith(s(\"/\"))):\n if path and path[:1] != s(\"/\"):\n path = s(\"/\") + path\n url = s(\"//\") + (netloc or s(\"\")) + path\n elif path:\n url += path\n if scheme:\n url = scheme + s(\":\") + url\n if query:\n url = url + s(\"?\") + query\n if fragment:\n url = url + s(\"#\") + fragment\n return url\n\n\ndef url_unquote(\n s: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"replace\",\n unsafe: str = \"\",\n) -> str:\n \"\"\"URL decode a single string with a given encoding. If the charset\n is set to `None` no decoding is performed and raw bytes are\n returned.\n\n :param s: the string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: the error handling for the charset decoding.\n \"\"\"\n rv = _unquote_to_bytes(s, unsafe)\n if charset is None:\n return rv\n return rv.decode(charset, errors)\n\n\ndef url_unquote_plus(\n s: t.Union[str, bytes], charset: str = \"utf-8\", errors: str = \"replace\"\n) -> str:\n \"\"\"URL decode a single string with the given `charset` and decode \"+\" to\n whitespace.\n\n Per default encoding errors are ignored. If you want a different behavior\n you can set `errors` to ``'replace'`` or ``'strict'``.\n\n :param s: The string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: The error handling for the `charset` decoding.\n \"\"\"\n if isinstance(s, str):\n s = s.replace(\"+\", \" \")\n else:\n s = s.replace(b\"+\", b\" \")\n return url_unquote(s, charset, errors)\n\n\ndef url_fix(s: str, charset: str = \"utf-8\") -> str:\n r\"\"\"Sometimes you get an URL by a user that just isn't a real URL because\n it contains unsafe characters like ' ' and so on. This function can fix\n some of the problems in a similar way browsers handle data entered by the\n user:\n\n >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\\xe4rung)')\n 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'\n\n :param s: the string with the URL to fix.\n :param charset: The target charset for the URL if the url was given\n as a string.\n \"\"\"\n # First step is to switch to text processing and to convert\n # backslashes (which are invalid in URLs anyways) to slashes. This is\n # consistent with what Chrome does.\n s = _to_str(s, charset, \"replace\").replace(\"\\\\\", \"/\")\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if s.startswith(\"file://\") and s[7:8].isalpha() and s[8:10] in (\":/\", \"|/\"):\n s = f\"file:///{s[7:]}\"\n\n url = url_parse(s)\n path = url_quote(url.path, charset, safe=\"/%+$!*'(),\")\n qs = url_quote_plus(url.query, charset, safe=\":&%=+$!*'(),\")\n anchor = url_quote_plus(url.fragment, charset, safe=\":&%=+$!*'(),\")\n return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))\n\n\n# not-unreserved characters remain quoted when unquoting to IRI\n_to_iri_unsafe = \"\".join([chr(c) for c in range(128) if c not in _always_safe])\n\n\ndef _codec_error_url_quote(e: UnicodeError) -> t.Tuple[str, int]:\n \"\"\"Used in :func:`uri_to_iri` after unquoting to re-quote any\n invalid bytes.\n \"\"\"\n # the docs state that UnicodeError does have these attributes,\n # but mypy isn't picking them up\n out = _fast_url_quote(e.object[e.start : e.end]) # type: ignore\n return out, e.end # type: ignore\n\n\ncodecs.register_error(\"werkzeug.url_quote\", _codec_error_url_quote)\n\n\ndef uri_to_iri(\n uri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"werkzeug.url_quote\",\n) -> str:\n \"\"\"Convert a URI to an IRI. All valid UTF-8 characters are unquoted,\n leaving all reserved and invalid characters quoted. If the URL has\n a domain, it is decoded from Punycode.\n\n >>> uri_to_iri(\"http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF\")\n 'http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF'\n\n :param uri: The URI to convert.\n :param charset: The encoding to encode unquoted bytes with.\n :param errors: Error handler to use during ``bytes.encode``. By\n default, invalid bytes are left quoted.\n\n .. versionchanged:: 0.15\n All reserved and invalid characters remain quoted. Previously,\n only some reserved characters were preserved, and invalid bytes\n were replaced instead of left quoted.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(uri, tuple):\n uri = url_unparse(uri)\n\n uri = url_parse(_to_str(uri, charset))\n path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)\n query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)\n fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)\n return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))\n\n\n# reserved characters remain unquoted when quoting to URI\n_to_uri_safe = \":/?#[]@!$&'()*+,;=%\"\n\n\ndef iri_to_uri(\n iri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe_conversion: bool = False,\n) -> str:\n \"\"\"Convert an IRI to a URI. All non-ASCII and unsafe characters are\n quoted. If the URL has a domain, it is encoded to Punycode.\n\n >>> iri_to_uri('http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF')\n 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'\n\n :param iri: The IRI to convert.\n :param charset: The encoding of the IRI.\n :param errors: Error handler to use during ``bytes.encode``.\n :param safe_conversion: Return the URL unchanged if it only contains\n ASCII characters and no whitespace. See the explanation below.\n\n There is a general problem with IRI conversion with some protocols\n that are in violation of the URI specification. Consider the\n following two IRIs::\n\n magnet:?xt=uri:whatever\n itms-services://?action=download-manifest\n\n After parsing, we don't know if the scheme requires the ``//``,\n which is dropped if empty, but conveys different meanings in the\n final URL if it's present or not. In this case, you can use\n ``safe_conversion``, which will return the URL unchanged if it only\n contains ASCII characters and no whitespace. This can result in a\n URI with unquoted characters if it was not already quoted correctly,\n but preserves the URL's semantics. Werkzeug uses this for the\n ``Location`` header for redirects.\n\n .. versionchanged:: 0.15\n All reserved characters remain unquoted. Previously, only some\n reserved characters were left unquoted.\n\n .. versionchanged:: 0.9.6\n The ``safe_conversion`` parameter was added.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(iri, tuple):\n iri = url_unparse(iri)\n\n if safe_conversion:\n # If we're not sure if it's safe to convert the URL, and it only\n # contains ASCII characters, return it unconverted.\n try:\n native_iri = _to_str(iri)\n ascii_iri = native_iri.encode(\"ascii\")\n\n # Only return if it doesn't have whitespace. (Why?)\n if len(ascii_iri.split()) == 1:\n return native_iri\n except UnicodeError:\n pass\n\n iri = url_parse(_to_str(iri, charset, errors))\n path = url_quote(iri.path, charset, errors, _to_uri_safe)\n query = url_quote(iri.query, charset, errors, _to_uri_safe)\n fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)\n return url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))\n\n\ndef url_decode(\n s: t.AnyStr,\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: str = \"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Parse a query string and return it as a :class:`MultiDict`.\n\n :param s: The query string to parse.\n :param charset: Decode bytes to string with this charset. If not\n given, bytes are returned as-is.\n :param include_empty: Include keys with empty values in the dict.\n :param errors: Error handling behavior when decoding bytes.\n :param separator: Separator character between pairs.\n :param cls: Container to hold result instead of :class:`MultiDict`.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n In previous versions \";\" and \"&\" could be used for url decoding.\n Now only \"&\" is supported. If you want to use \";\", a different\n ``separator`` can be provided.\n\n .. versionchanged:: 0.5\n The ``cls`` parameter was added.\n \"\"\"\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n if isinstance(s, str) and not isinstance(separator, str):\n separator = separator.decode(charset or \"ascii\")\n elif isinstance(s, bytes) and not isinstance(separator, bytes):\n separator = separator.encode(charset or \"ascii\") # type: ignore\n return cls(\n _url_decode_impl(\n s.split(separator), charset, include_empty, errors # type: ignore\n )\n )\n\n\ndef url_decode_stream(\n stream: t.IO[bytes],\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: bytes = b\"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n limit: t.Optional[int] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Works like :func:`url_decode` but decodes a stream. The behavior\n of stream and limit follows functions like\n :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is\n directly fed to the `cls` so you can consume the data while it's\n parsed.\n\n :param stream: a stream with the encoded querystring\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param include_empty: Set to `False` if you don't want empty values to\n appear in the dict.\n :param errors: the decoding error behavior.\n :param separator: the pair separator to be used, defaults to ``&``\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`MultiDict` is used.\n :param limit: the content length of the URL data. Not necessary if\n a limited stream is provided.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` and ``return_iterator`` parameters are\n deprecated and will be removed in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n from .wsgi import make_chunk_iter\n\n pair_iter = make_chunk_iter(stream, separator, limit)\n decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)\n\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n\n return cls(decoder)\n\n\ndef _url_decode_impl(\n pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str\n) -> t.Iterator[t.Tuple[str, str]]:\n for pair in pair_iter:\n if not pair:\n continue\n s = _make_encode_wrapper(pair)\n equal = s(\"=\")\n if equal in pair:\n key, value = pair.split(equal, 1)\n else:\n if not include_empty:\n continue\n key = pair\n value = s(\"\")\n yield (\n url_unquote_plus(key, charset, errors),\n url_unquote_plus(value, charset, errors),\n )\n\n\ndef url_encode(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> str:\n \"\"\"URL encode a dict/`MultiDict`. If a value is `None` it will not appear\n in the result string. Per default only values are encoded into the target\n charset strings.\n\n :param obj: the object to encode into a query string.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n Added the ``sort``, ``key``, and ``separator`` parameters.\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n return separator.join(_url_encode_impl(obj, charset, sort, key))\n\n\ndef url_encode_stream(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n stream: t.Optional[t.IO[str]] = None,\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> None:\n \"\"\"Like :meth:`url_encode` but writes the results to a stream\n object. If the stream is `None` a generator over all encoded\n pairs is returned.\n\n :param obj: the object to encode into a query string.\n :param stream: a stream to write the encoded object into or `None` if\n an iterator over the encoded pairs should be returned. In\n that case the separator argument is ignored.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n gen = _url_encode_impl(obj, charset, sort, key)\n if stream is None:\n return gen # type: ignore\n for idx, chunk in enumerate(gen):\n if idx:\n stream.write(separator)\n stream.write(chunk)\n return None\n\n\ndef url_join(\n base: t.Union[str, t.Tuple[str, str, str, str, str]],\n url: t.Union[str, t.Tuple[str, str, str, str, str]],\n allow_fragments: bool = True,\n) -> str:\n \"\"\"Join a base URL and a possibly relative URL to form an absolute\n interpretation of the latter.\n\n :param base: the base URL for the join operation.\n :param url: the URL to join.\n :param allow_fragments: indicates whether fragments should be allowed.\n \"\"\"\n if isinstance(base, tuple):\n base = url_unparse(base)\n if isinstance(url, tuple):\n url = url_unparse(url)\n\n _check_str_tuple((base, url))\n s = _make_encode_wrapper(base)\n\n if not base:\n return url\n if not url:\n return base\n\n bscheme, bnetloc, bpath, bquery, bfragment = url_parse(\n base, allow_fragments=allow_fragments\n )\n scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)\n if scheme != bscheme:\n return url\n if netloc:\n return url_unparse((scheme, netloc, path, query, fragment))\n netloc = bnetloc\n\n if path[:1] == s(\"/\"):\n segments = path.split(s(\"/\"))\n elif not path:\n segments = bpath.split(s(\"/\"))\n if not query:\n query = bquery\n else:\n segments = bpath.split(s(\"/\"))[:-1] + path.split(s(\"/\"))\n\n # If the rightmost part is \"./\" we want to keep the slash but\n # remove the dot.\n if segments[-1] == s(\".\"):\n segments[-1] = s(\"\")\n\n # Resolve \"..\" and \".\"\n segments = [segment for segment in segments if segment != s(\".\")]\n while True:\n i = 1\n n = len(segments) - 1\n while i < n:\n if segments[i] == s(\"..\") and segments[i - 1] not in (s(\"\"), s(\"..\")):\n del segments[i - 1 : i + 1]\n break\n i += 1\n else:\n break\n\n # Remove trailing \"..\" if the URL is absolute\n unwanted_marker = [s(\"\"), s(\"..\")]\n while segments[:2] == unwanted_marker:\n del segments[1]\n\n path = s(\"/\").join(segments)\n return url_unparse((scheme, netloc, path, query, fragment))\n","path":"src/werkzeug/urls.py"}],"string":"[\n {\n \"content\": \"\\\"\\\"\\\"Functions for working with URLs.\\n\\nContains implementations of functions from :mod:`urllib.parse` that\\nhandle bytes and strings.\\n\\\"\\\"\\\"\\nimport codecs\\nimport os\\nimport re\\nimport typing as t\\n\\nfrom ._internal import _check_str_tuple\\nfrom ._internal import _decode_idna\\nfrom ._internal import _encode_idna\\nfrom ._internal import _make_encode_wrapper\\nfrom ._internal import _to_str\\n\\nif t.TYPE_CHECKING:\\n from . import datastructures as ds\\n\\n# A regular expression for what a valid schema looks like\\n_scheme_re = re.compile(r\\\"^[a-zA-Z0-9+-.]+$\\\")\\n\\n# Characters that are safe in any part of an URL.\\n_always_safe = frozenset(\\n bytearray(\\n b\\\"abcdefghijklmnopqrstuvwxyz\\\"\\n b\\\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\\\"\\n b\\\"0123456789\\\"\\n b\\\"-._~\\\"\\n b\\\"$!'()*+,;\\\" # RFC3986 sub-delims set, not including query string delimiters &=\\n )\\n)\\n\\n_hexdigits = \\\"0123456789ABCDEFabcdef\\\"\\n_hextobyte = {\\n f\\\"{a}{b}\\\".encode(\\\"ascii\\\"): int(f\\\"{a}{b}\\\", 16)\\n for a in _hexdigits\\n for b in _hexdigits\\n}\\n_bytetohex = [f\\\"%{char:02X}\\\".encode(\\\"ascii\\\") for char in range(256)]\\n\\n\\nclass _URLTuple(t.NamedTuple):\\n scheme: str\\n netloc: str\\n path: str\\n query: str\\n fragment: str\\n\\n\\nclass BaseURL(_URLTuple):\\n \\\"\\\"\\\"Superclass of :py:class:`URL` and :py:class:`BytesURL`.\\\"\\\"\\\"\\n\\n __slots__ = ()\\n _at: str\\n _colon: str\\n _lbracket: str\\n _rbracket: str\\n\\n def __str__(self) -> str:\\n return self.to_url()\\n\\n def replace(self, **kwargs: t.Any) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Return an URL with the same values, except for those parameters\\n given new values by whichever keyword arguments are specified.\\\"\\\"\\\"\\n return self._replace(**kwargs)\\n\\n @property\\n def host(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The host part of the URL if available, otherwise `None`. The\\n host is either the hostname or the IP address mentioned in the\\n URL. It will not contain the port.\\n \\\"\\\"\\\"\\n return self._split_host()[0]\\n\\n @property\\n def ascii_host(self) -> t.Optional[str]:\\n \\\"\\\"\\\"Works exactly like :attr:`host` but will return a result that\\n is restricted to ASCII. If it finds a netloc that is not ASCII\\n it will attempt to idna decode it. This is useful for socket\\n operations when the URL might include internationalized characters.\\n \\\"\\\"\\\"\\n rv = self.host\\n if rv is not None and isinstance(rv, str):\\n try:\\n rv = _encode_idna(rv) # type: ignore\\n except UnicodeError:\\n rv = rv.encode(\\\"ascii\\\", \\\"ignore\\\") # type: ignore\\n return _to_str(rv, \\\"ascii\\\", \\\"ignore\\\")\\n\\n @property\\n def port(self) -> t.Optional[int]:\\n \\\"\\\"\\\"The port in the URL as an integer if it was present, `None`\\n otherwise. This does not fill in default ports.\\n \\\"\\\"\\\"\\n try:\\n rv = int(_to_str(self._split_host()[1]))\\n if 0 <= rv <= 65535:\\n return rv\\n except (ValueError, TypeError):\\n pass\\n return None\\n\\n @property\\n def auth(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The authentication part in the URL if available, `None`\\n otherwise.\\n \\\"\\\"\\\"\\n return self._split_netloc()[0]\\n\\n @property\\n def username(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The username if it was part of the URL, `None` otherwise.\\n This undergoes URL decoding and will always be a string.\\n \\\"\\\"\\\"\\n rv = self._split_auth()[0]\\n if rv is not None:\\n return _url_unquote_legacy(rv)\\n return None\\n\\n @property\\n def raw_username(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The username if it was part of the URL, `None` otherwise.\\n Unlike :attr:`username` this one is not being decoded.\\n \\\"\\\"\\\"\\n return self._split_auth()[0]\\n\\n @property\\n def password(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The password if it was part of the URL, `None` otherwise.\\n This undergoes URL decoding and will always be a string.\\n \\\"\\\"\\\"\\n rv = self._split_auth()[1]\\n if rv is not None:\\n return _url_unquote_legacy(rv)\\n return None\\n\\n @property\\n def raw_password(self) -> t.Optional[str]:\\n \\\"\\\"\\\"The password if it was part of the URL, `None` otherwise.\\n Unlike :attr:`password` this one is not being decoded.\\n \\\"\\\"\\\"\\n return self._split_auth()[1]\\n\\n def decode_query(self, *args: t.Any, **kwargs: t.Any) -> \\\"ds.MultiDict[str, str]\\\":\\n \\\"\\\"\\\"Decodes the query part of the URL. Ths is a shortcut for\\n calling :func:`url_decode` on the query argument. The arguments and\\n keyword arguments are forwarded to :func:`url_decode` unchanged.\\n \\\"\\\"\\\"\\n return url_decode(self.query, *args, **kwargs)\\n\\n def join(self, *args: t.Any, **kwargs: t.Any) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Joins this URL with another one. This is just a convenience\\n function for calling into :meth:`url_join` and then parsing the\\n return value again.\\n \\\"\\\"\\\"\\n return url_parse(url_join(self, *args, **kwargs))\\n\\n def to_url(self) -> str:\\n \\\"\\\"\\\"Returns a URL string or bytes depending on the type of the\\n information stored. This is just a convenience function\\n for calling :meth:`url_unparse` for this URL.\\n \\\"\\\"\\\"\\n return url_unparse(self)\\n\\n def encode_netloc(self) -> str:\\n \\\"\\\"\\\"Encodes the netloc part to an ASCII safe URL as bytes.\\\"\\\"\\\"\\n rv = self.ascii_host or \\\"\\\"\\n if \\\":\\\" in rv:\\n rv = f\\\"[{rv}]\\\"\\n port = self.port\\n if port is not None:\\n rv = f\\\"{rv}:{port}\\\"\\n auth = \\\":\\\".join(\\n filter(\\n None,\\n [\\n url_quote(self.raw_username or \\\"\\\", \\\"utf-8\\\", \\\"strict\\\", \\\"/:%\\\"),\\n url_quote(self.raw_password or \\\"\\\", \\\"utf-8\\\", \\\"strict\\\", \\\"/:%\\\"),\\n ],\\n )\\n )\\n if auth:\\n rv = f\\\"{auth}@{rv}\\\"\\n return rv\\n\\n def decode_netloc(self) -> str:\\n \\\"\\\"\\\"Decodes the netloc part into a string.\\\"\\\"\\\"\\n rv = _decode_idna(self.host or \\\"\\\")\\n\\n if \\\":\\\" in rv:\\n rv = f\\\"[{rv}]\\\"\\n port = self.port\\n if port is not None:\\n rv = f\\\"{rv}:{port}\\\"\\n auth = \\\":\\\".join(\\n filter(\\n None,\\n [\\n _url_unquote_legacy(self.raw_username or \\\"\\\", \\\"/:%@\\\"),\\n _url_unquote_legacy(self.raw_password or \\\"\\\", \\\"/:%@\\\"),\\n ],\\n )\\n )\\n if auth:\\n rv = f\\\"{auth}@{rv}\\\"\\n return rv\\n\\n def to_uri_tuple(self) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Returns a :class:`BytesURL` tuple that holds a URI. This will\\n encode all the information in the URL properly to ASCII using the\\n rules a web browser would follow.\\n\\n It's usually more interesting to directly call :meth:`iri_to_uri` which\\n will return a string.\\n \\\"\\\"\\\"\\n return url_parse(iri_to_uri(self))\\n\\n def to_iri_tuple(self) -> \\\"BaseURL\\\":\\n \\\"\\\"\\\"Returns a :class:`URL` tuple that holds a IRI. This will try\\n to decode as much information as possible in the URL without\\n losing information similar to how a web browser does it for the\\n URL bar.\\n\\n It's usually more interesting to directly call :meth:`uri_to_iri` which\\n will return a string.\\n \\\"\\\"\\\"\\n return url_parse(uri_to_iri(self))\\n\\n def get_file_location(\\n self, pathformat: t.Optional[str] = None\\n ) -> t.Tuple[t.Optional[str], t.Optional[str]]:\\n \\\"\\\"\\\"Returns a tuple with the location of the file in the form\\n ``(server, location)``. If the netloc is empty in the URL or\\n points to localhost, it's represented as ``None``.\\n\\n The `pathformat` by default is autodetection but needs to be set\\n when working with URLs of a specific system. The supported values\\n are ``'windows'`` when working with Windows or DOS paths and\\n ``'posix'`` when working with posix paths.\\n\\n If the URL does not point to a local file, the server and location\\n are both represented as ``None``.\\n\\n :param pathformat: The expected format of the path component.\\n Currently ``'windows'`` and ``'posix'`` are\\n supported. Defaults to ``None`` which is\\n autodetect.\\n \\\"\\\"\\\"\\n if self.scheme != \\\"file\\\":\\n return None, None\\n\\n path = url_unquote(self.path)\\n host = self.netloc or None\\n\\n if pathformat is None:\\n if os.name == \\\"nt\\\":\\n pathformat = \\\"windows\\\"\\n else:\\n pathformat = \\\"posix\\\"\\n\\n if pathformat == \\\"windows\\\":\\n if path[:1] == \\\"/\\\" and path[1:2].isalpha() and path[2:3] in \\\"|:\\\":\\n path = f\\\"{path[1:2]}:{path[3:]}\\\"\\n windows_share = path[:3] in (\\\"\\\\\\\\\\\" * 3, \\\"/\\\" * 3)\\n import ntpath\\n\\n path = ntpath.normpath(path)\\n # Windows shared drives are represented as ``\\\\\\\\host\\\\\\\\directory``.\\n # That results in a URL like ``file://///host/directory``, and a\\n # path like ``///host/directory``. We need to special-case this\\n # because the path contains the hostname.\\n if windows_share and host is None:\\n parts = path.lstrip(\\\"\\\\\\\\\\\").split(\\\"\\\\\\\\\\\", 1)\\n if len(parts) == 2:\\n host, path = parts\\n else:\\n host = parts[0]\\n path = \\\"\\\"\\n elif pathformat == \\\"posix\\\":\\n import posixpath\\n\\n path = posixpath.normpath(path)\\n else:\\n raise TypeError(f\\\"Invalid path format {pathformat!r}\\\")\\n\\n if host in (\\\"127.0.0.1\\\", \\\"::1\\\", \\\"localhost\\\"):\\n host = None\\n\\n return host, path\\n\\n def _split_netloc(self) -> t.Tuple[t.Optional[str], str]:\\n if self._at in self.netloc:\\n auth, _, netloc = self.netloc.partition(self._at)\\n return auth, netloc\\n return None, self.netloc\\n\\n def _split_auth(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\\n auth = self._split_netloc()[0]\\n if not auth:\\n return None, None\\n if self._colon not in auth:\\n return auth, None\\n\\n username, _, password = auth.partition(self._colon)\\n return username, password\\n\\n def _split_host(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\\n rv = self._split_netloc()[1]\\n if not rv:\\n return None, None\\n\\n if not rv.startswith(self._lbracket):\\n if self._colon in rv:\\n host, _, port = rv.partition(self._colon)\\n return host, port\\n return rv, None\\n\\n idx = rv.find(self._rbracket)\\n if idx < 0:\\n return rv, None\\n\\n host = rv[1:idx]\\n rest = rv[idx + 1 :]\\n if rest.startswith(self._colon):\\n return host, rest[1:]\\n return host, None\\n\\n\\nclass URL(BaseURL):\\n \\\"\\\"\\\"Represents a parsed URL. This behaves like a regular tuple but\\n also has some extra attributes that give further insight into the\\n URL.\\n \\\"\\\"\\\"\\n\\n __slots__ = ()\\n _at = \\\"@\\\"\\n _colon = \\\":\\\"\\n _lbracket = \\\"[\\\"\\n _rbracket = \\\"]\\\"\\n\\n def encode(self, charset: str = \\\"utf-8\\\", errors: str = \\\"replace\\\") -> \\\"BytesURL\\\":\\n \\\"\\\"\\\"Encodes the URL to a tuple made out of bytes. The charset is\\n only being used for the path, query and fragment.\\n \\\"\\\"\\\"\\n return BytesURL(\\n self.scheme.encode(\\\"ascii\\\"), # type: ignore\\n self.encode_netloc(),\\n self.path.encode(charset, errors), # type: ignore\\n self.query.encode(charset, errors), # type: ignore\\n self.fragment.encode(charset, errors), # type: ignore\\n )\\n\\n\\nclass BytesURL(BaseURL):\\n \\\"\\\"\\\"Represents a parsed URL in bytes.\\\"\\\"\\\"\\n\\n __slots__ = ()\\n _at = b\\\"@\\\" # type: ignore\\n _colon = b\\\":\\\" # type: ignore\\n _lbracket = b\\\"[\\\" # type: ignore\\n _rbracket = b\\\"]\\\" # type: ignore\\n\\n def __str__(self) -> str:\\n return self.to_url().decode(\\\"utf-8\\\", \\\"replace\\\") # type: ignore\\n\\n def encode_netloc(self) -> bytes: # type: ignore\\n \\\"\\\"\\\"Returns the netloc unchanged as bytes.\\\"\\\"\\\"\\n return self.netloc # type: ignore\\n\\n def decode(self, charset: str = \\\"utf-8\\\", errors: str = \\\"replace\\\") -> \\\"URL\\\":\\n \\\"\\\"\\\"Decodes the URL to a tuple made out of strings. The charset is\\n only being used for the path, query and fragment.\\n \\\"\\\"\\\"\\n return URL(\\n self.scheme.decode(\\\"ascii\\\"), # type: ignore\\n self.decode_netloc(),\\n self.path.decode(charset, errors), # type: ignore\\n self.query.decode(charset, errors), # type: ignore\\n self.fragment.decode(charset, errors), # type: ignore\\n )\\n\\n\\n_unquote_maps: t.Dict[t.FrozenSet[int], t.Dict[bytes, int]] = {frozenset(): _hextobyte}\\n\\n\\ndef _unquote_to_bytes(\\n string: t.Union[str, bytes], unsafe: t.Union[str, bytes] = \\\"\\\"\\n) -> bytes:\\n if isinstance(string, str):\\n string = string.encode(\\\"utf-8\\\")\\n\\n if isinstance(unsafe, str):\\n unsafe = unsafe.encode(\\\"utf-8\\\")\\n\\n unsafe = frozenset(bytearray(unsafe))\\n groups = iter(string.split(b\\\"%\\\"))\\n result = bytearray(next(groups, b\\\"\\\"))\\n\\n try:\\n hex_to_byte = _unquote_maps[unsafe]\\n except KeyError:\\n hex_to_byte = _unquote_maps[unsafe] = {\\n h: b for h, b in _hextobyte.items() if b not in unsafe\\n }\\n\\n for group in groups:\\n code = group[:2]\\n\\n if code in hex_to_byte:\\n result.append(hex_to_byte[code])\\n result.extend(group[2:])\\n else:\\n result.append(37) # %\\n result.extend(group)\\n\\n return bytes(result)\\n\\n\\ndef _url_encode_impl(\\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\\n charset: str,\\n sort: bool,\\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]],\\n) -> t.Iterator[str]:\\n from .datastructures import iter_multi_items\\n\\n iterable: t.Iterable[t.Tuple[str, str]] = iter_multi_items(obj)\\n\\n if sort:\\n iterable = sorted(iterable, key=key)\\n\\n for key_str, value_str in iterable:\\n if value_str is None:\\n continue\\n\\n if not isinstance(key_str, bytes):\\n key_bytes = str(key_str).encode(charset)\\n else:\\n key_bytes = key_str\\n\\n if not isinstance(value_str, bytes):\\n value_bytes = str(value_str).encode(charset)\\n else:\\n value_bytes = value_str\\n\\n yield f\\\"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}\\\"\\n\\n\\ndef _url_unquote_legacy(value: str, unsafe: str = \\\"\\\") -> str:\\n try:\\n return url_unquote(value, charset=\\\"utf-8\\\", errors=\\\"strict\\\", unsafe=unsafe)\\n except UnicodeError:\\n return url_unquote(value, charset=\\\"latin1\\\", unsafe=unsafe)\\n\\n\\ndef url_parse(\\n url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True\\n) -> BaseURL:\\n \\\"\\\"\\\"Parses a URL from a string into a :class:`URL` tuple. If the URL\\n is lacking a scheme it can be provided as second argument. Otherwise,\\n it is ignored. Optionally fragments can be stripped from the URL\\n by setting `allow_fragments` to `False`.\\n\\n The inverse of this function is :func:`url_unparse`.\\n\\n :param url: the URL to parse.\\n :param scheme: the default schema to use if the URL is schemaless.\\n :param allow_fragments: if set to `False` a fragment will be removed\\n from the URL.\\n \\\"\\\"\\\"\\n s = _make_encode_wrapper(url)\\n is_text_based = isinstance(url, str)\\n\\n if scheme is None:\\n scheme = s(\\\"\\\")\\n netloc = query = fragment = s(\\\"\\\")\\n i = url.find(s(\\\":\\\"))\\n if i > 0 and _scheme_re.match(_to_str(url[:i], errors=\\\"replace\\\")):\\n # make sure \\\"iri\\\" is not actually a port number (in which case\\n # \\\"scheme\\\" is really part of the path)\\n rest = url[i + 1 :]\\n if not rest or any(c not in s(\\\"0123456789\\\") for c in rest):\\n # not a port number\\n scheme, url = url[:i].lower(), rest\\n\\n if url[:2] == s(\\\"//\\\"):\\n delim = len(url)\\n for c in s(\\\"/?#\\\"):\\n wdelim = url.find(c, 2)\\n if wdelim >= 0:\\n delim = min(delim, wdelim)\\n netloc, url = url[2:delim], url[delim:]\\n if (s(\\\"[\\\") in netloc and s(\\\"]\\\") not in netloc) or (\\n s(\\\"]\\\") in netloc and s(\\\"[\\\") not in netloc\\n ):\\n raise ValueError(\\\"Invalid IPv6 URL\\\")\\n\\n if allow_fragments and s(\\\"#\\\") in url:\\n url, fragment = url.split(s(\\\"#\\\"), 1)\\n if s(\\\"?\\\") in url:\\n url, query = url.split(s(\\\"?\\\"), 1)\\n\\n result_type = URL if is_text_based else BytesURL\\n return result_type(scheme, netloc, url, query, fragment)\\n\\n\\ndef _make_fast_url_quote(\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"strict\\\",\\n safe: t.Union[str, bytes] = \\\"/:\\\",\\n unsafe: t.Union[str, bytes] = \\\"\\\",\\n) -> t.Callable[[bytes], str]:\\n \\\"\\\"\\\"Precompile the translation table for a URL encoding function.\\n\\n Unlike :func:`url_quote`, the generated function only takes the\\n string to quote.\\n\\n :param charset: The charset to encode the result with.\\n :param errors: How to handle encoding errors.\\n :param safe: An optional sequence of safe characters to never encode.\\n :param unsafe: An optional sequence of unsafe characters to always encode.\\n \\\"\\\"\\\"\\n if isinstance(safe, str):\\n safe = safe.encode(charset, errors)\\n\\n if isinstance(unsafe, str):\\n unsafe = unsafe.encode(charset, errors)\\n\\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\\n table = [chr(c) if c in safe else f\\\"%{c:02X}\\\" for c in range(256)]\\n\\n def quote(string: bytes) -> str:\\n return \\\"\\\".join([table[c] for c in string])\\n\\n return quote\\n\\n\\n_fast_url_quote = _make_fast_url_quote()\\n_fast_quote_plus = _make_fast_url_quote(safe=\\\" \\\", unsafe=\\\"+\\\")\\n\\n\\ndef _fast_url_quote_plus(string: bytes) -> str:\\n return _fast_quote_plus(string).replace(\\\" \\\", \\\"+\\\")\\n\\n\\ndef url_quote(\\n string: t.Union[str, bytes],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"strict\\\",\\n safe: t.Union[str, bytes] = \\\"/:\\\",\\n unsafe: t.Union[str, bytes] = \\\"\\\",\\n) -> str:\\n \\\"\\\"\\\"URL encode a single string with a given encoding.\\n\\n :param s: the string to quote.\\n :param charset: the charset to be used.\\n :param safe: an optional sequence of safe characters.\\n :param unsafe: an optional sequence of unsafe characters.\\n\\n .. versionadded:: 0.9.2\\n The `unsafe` parameter was added.\\n \\\"\\\"\\\"\\n if not isinstance(string, (str, bytes, bytearray)):\\n string = str(string)\\n if isinstance(string, str):\\n string = string.encode(charset, errors)\\n if isinstance(safe, str):\\n safe = safe.encode(charset, errors)\\n if isinstance(unsafe, str):\\n unsafe = unsafe.encode(charset, errors)\\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\\n rv = bytearray()\\n for char in bytearray(string):\\n if char in safe:\\n rv.append(char)\\n else:\\n rv.extend(_bytetohex[char])\\n return bytes(rv).decode(charset)\\n\\n\\ndef url_quote_plus(\\n string: str, charset: str = \\\"utf-8\\\", errors: str = \\\"strict\\\", safe: str = \\\"\\\"\\n) -> str:\\n \\\"\\\"\\\"URL encode a single string with the given encoding and convert\\n whitespace to \\\"+\\\".\\n\\n :param s: The string to quote.\\n :param charset: The charset to be used.\\n :param safe: An optional sequence of safe characters.\\n \\\"\\\"\\\"\\n return url_quote(string, charset, errors, safe + \\\" \\\", \\\"+\\\").replace(\\\" \\\", \\\"+\\\")\\n\\n\\ndef url_unparse(components: t.Tuple[str, str, str, str, str]) -> str:\\n \\\"\\\"\\\"The reverse operation to :meth:`url_parse`. This accepts arbitrary\\n as well as :class:`URL` tuples and returns a URL as a string.\\n\\n :param components: the parsed URL as tuple which should be converted\\n into a URL string.\\n \\\"\\\"\\\"\\n _check_str_tuple(components)\\n scheme, netloc, path, query, fragment = components\\n s = _make_encode_wrapper(scheme)\\n url = s(\\\"\\\")\\n\\n # We generally treat file:///x and file:/x the same which is also\\n # what browsers seem to do. This also allows us to ignore a schema\\n # register for netloc utilization or having to differentiate between\\n # empty and missing netloc.\\n if netloc or (scheme and path.startswith(s(\\\"/\\\"))):\\n if path and path[:1] != s(\\\"/\\\"):\\n path = s(\\\"/\\\") + path\\n url = s(\\\"//\\\") + (netloc or s(\\\"\\\")) + path\\n elif path:\\n url += path\\n if scheme:\\n url = scheme + s(\\\":\\\") + url\\n if query:\\n url = url + s(\\\"?\\\") + query\\n if fragment:\\n url = url + s(\\\"#\\\") + fragment\\n return url\\n\\n\\ndef url_unquote(\\n s: t.Union[str, bytes],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"replace\\\",\\n unsafe: str = \\\"\\\",\\n) -> str:\\n \\\"\\\"\\\"URL decode a single string with a given encoding. If the charset\\n is set to `None` no decoding is performed and raw bytes are\\n returned.\\n\\n :param s: the string to unquote.\\n :param charset: the charset of the query string. If set to `None`\\n no decoding will take place.\\n :param errors: the error handling for the charset decoding.\\n \\\"\\\"\\\"\\n rv = _unquote_to_bytes(s, unsafe)\\n if charset is None:\\n return rv\\n return rv.decode(charset, errors)\\n\\n\\ndef url_unquote_plus(\\n s: t.Union[str, bytes], charset: str = \\\"utf-8\\\", errors: str = \\\"replace\\\"\\n) -> str:\\n \\\"\\\"\\\"URL decode a single string with the given `charset` and decode \\\"+\\\" to\\n whitespace.\\n\\n Per default encoding errors are ignored. If you want a different behavior\\n you can set `errors` to ``'replace'`` or ``'strict'``.\\n\\n :param s: The string to unquote.\\n :param charset: the charset of the query string. If set to `None`\\n no decoding will take place.\\n :param errors: The error handling for the `charset` decoding.\\n \\\"\\\"\\\"\\n if isinstance(s, str):\\n s = s.replace(\\\"+\\\", \\\" \\\")\\n else:\\n s = s.replace(b\\\"+\\\", b\\\" \\\")\\n return url_unquote(s, charset, errors)\\n\\n\\ndef url_fix(s: str, charset: str = \\\"utf-8\\\") -> str:\\n r\\\"\\\"\\\"Sometimes you get an URL by a user that just isn't a real URL because\\n it contains unsafe characters like ' ' and so on. This function can fix\\n some of the problems in a similar way browsers handle data entered by the\\n user:\\n\\n >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\\\\xe4rung)')\\n 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'\\n\\n :param s: the string with the URL to fix.\\n :param charset: The target charset for the URL if the url was given\\n as a string.\\n \\\"\\\"\\\"\\n # First step is to switch to text processing and to convert\\n # backslashes (which are invalid in URLs anyways) to slashes. This is\\n # consistent with what Chrome does.\\n s = _to_str(s, charset, \\\"replace\\\").replace(\\\"\\\\\\\\\\\", \\\"/\\\")\\n\\n # For the specific case that we look like a malformed windows URL\\n # we want to fix this up manually:\\n if s.startswith(\\\"file://\\\") and s[7:8].isalpha() and s[8:10] in (\\\":/\\\", \\\"|/\\\"):\\n s = f\\\"file:///{s[7:]}\\\"\\n\\n url = url_parse(s)\\n path = url_quote(url.path, charset, safe=\\\"/%+$!*'(),\\\")\\n qs = url_quote_plus(url.query, charset, safe=\\\":&%=+$!*'(),\\\")\\n anchor = url_quote_plus(url.fragment, charset, safe=\\\":&%=+$!*'(),\\\")\\n return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))\\n\\n\\n# not-unreserved characters remain quoted when unquoting to IRI\\n_to_iri_unsafe = \\\"\\\".join([chr(c) for c in range(128) if c not in _always_safe])\\n\\n\\ndef _codec_error_url_quote(e: UnicodeError) -> t.Tuple[str, int]:\\n \\\"\\\"\\\"Used in :func:`uri_to_iri` after unquoting to re-quote any\\n invalid bytes.\\n \\\"\\\"\\\"\\n # the docs state that UnicodeError does have these attributes,\\n # but mypy isn't picking them up\\n out = _fast_url_quote(e.object[e.start : e.end]) # type: ignore\\n return out, e.end # type: ignore\\n\\n\\ncodecs.register_error(\\\"werkzeug.url_quote\\\", _codec_error_url_quote)\\n\\n\\ndef uri_to_iri(\\n uri: t.Union[str, t.Tuple[str, str, str, str, str]],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"werkzeug.url_quote\\\",\\n) -> str:\\n \\\"\\\"\\\"Convert a URI to an IRI. All valid UTF-8 characters are unquoted,\\n leaving all reserved and invalid characters quoted. If the URL has\\n a domain, it is decoded from Punycode.\\n\\n >>> uri_to_iri(\\\"http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF\\\")\\n 'http://\\\\\\\\u2603.net/p\\\\\\\\xe5th?q=\\\\\\\\xe8ry%DF'\\n\\n :param uri: The URI to convert.\\n :param charset: The encoding to encode unquoted bytes with.\\n :param errors: Error handler to use during ``bytes.encode``. By\\n default, invalid bytes are left quoted.\\n\\n .. versionchanged:: 0.15\\n All reserved and invalid characters remain quoted. Previously,\\n only some reserved characters were preserved, and invalid bytes\\n were replaced instead of left quoted.\\n\\n .. versionadded:: 0.6\\n \\\"\\\"\\\"\\n if isinstance(uri, tuple):\\n uri = url_unparse(uri)\\n\\n uri = url_parse(_to_str(uri, charset))\\n path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)\\n query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)\\n fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)\\n return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))\\n\\n\\n# reserved characters remain unquoted when quoting to URI\\n_to_uri_safe = \\\":/?#[]@!$&'()*+,;=%\\\"\\n\\n\\ndef iri_to_uri(\\n iri: t.Union[str, t.Tuple[str, str, str, str, str]],\\n charset: str = \\\"utf-8\\\",\\n errors: str = \\\"strict\\\",\\n safe_conversion: bool = False,\\n) -> str:\\n \\\"\\\"\\\"Convert an IRI to a URI. All non-ASCII and unsafe characters are\\n quoted. If the URL has a domain, it is encoded to Punycode.\\n\\n >>> iri_to_uri('http://\\\\\\\\u2603.net/p\\\\\\\\xe5th?q=\\\\\\\\xe8ry%DF')\\n 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'\\n\\n :param iri: The IRI to convert.\\n :param charset: The encoding of the IRI.\\n :param errors: Error handler to use during ``bytes.encode``.\\n :param safe_conversion: Return the URL unchanged if it only contains\\n ASCII characters and no whitespace. See the explanation below.\\n\\n There is a general problem with IRI conversion with some protocols\\n that are in violation of the URI specification. Consider the\\n following two IRIs::\\n\\n magnet:?xt=uri:whatever\\n itms-services://?action=download-manifest\\n\\n After parsing, we don't know if the scheme requires the ``//``,\\n which is dropped if empty, but conveys different meanings in the\\n final URL if it's present or not. In this case, you can use\\n ``safe_conversion``, which will return the URL unchanged if it only\\n contains ASCII characters and no whitespace. This can result in a\\n URI with unquoted characters if it was not already quoted correctly,\\n but preserves the URL's semantics. Werkzeug uses this for the\\n ``Location`` header for redirects.\\n\\n .. versionchanged:: 0.15\\n All reserved characters remain unquoted. Previously, only some\\n reserved characters were left unquoted.\\n\\n .. versionchanged:: 0.9.6\\n The ``safe_conversion`` parameter was added.\\n\\n .. versionadded:: 0.6\\n \\\"\\\"\\\"\\n if isinstance(iri, tuple):\\n iri = url_unparse(iri)\\n\\n if safe_conversion:\\n # If we're not sure if it's safe to convert the URL, and it only\\n # contains ASCII characters, return it unconverted.\\n try:\\n native_iri = _to_str(iri)\\n ascii_iri = native_iri.encode(\\\"ascii\\\")\\n\\n # Only return if it doesn't have whitespace. (Why?)\\n if len(ascii_iri.split()) == 1:\\n return native_iri\\n except UnicodeError:\\n pass\\n\\n iri = url_parse(_to_str(iri, charset, errors))\\n path = url_quote(iri.path, charset, errors, _to_uri_safe)\\n query = url_quote(iri.query, charset, errors, _to_uri_safe)\\n fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)\\n return url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))\\n\\n\\ndef url_decode(\\n s: t.AnyStr,\\n charset: str = \\\"utf-8\\\",\\n include_empty: bool = True,\\n errors: str = \\\"replace\\\",\\n separator: str = \\\"&\\\",\\n cls: t.Optional[t.Type[\\\"ds.MultiDict\\\"]] = None,\\n) -> \\\"ds.MultiDict[str, str]\\\":\\n \\\"\\\"\\\"Parse a query string and return it as a :class:`MultiDict`.\\n\\n :param s: The query string to parse.\\n :param charset: Decode bytes to string with this charset. If not\\n given, bytes are returned as-is.\\n :param include_empty: Include keys with empty values in the dict.\\n :param errors: Error handling behavior when decoding bytes.\\n :param separator: Separator character between pairs.\\n :param cls: Container to hold result instead of :class:`MultiDict`.\\n\\n .. versionchanged:: 2.0\\n The ``decode_keys`` parameter is deprecated and will be removed\\n in Werkzeug 2.1.\\n\\n .. versionchanged:: 0.5\\n In previous versions \\\";\\\" and \\\"&\\\" could be used for url decoding.\\n Now only \\\"&\\\" is supported. If you want to use \\\";\\\", a different\\n ``separator`` can be provided.\\n\\n .. versionchanged:: 0.5\\n The ``cls`` parameter was added.\\n \\\"\\\"\\\"\\n if cls is None:\\n from .datastructures import MultiDict # noqa: F811\\n\\n cls = MultiDict\\n if isinstance(s, str) and not isinstance(separator, str):\\n separator = separator.decode(charset or \\\"ascii\\\")\\n elif isinstance(s, bytes) and not isinstance(separator, bytes):\\n separator = separator.encode(charset or \\\"ascii\\\") # type: ignore\\n return cls(\\n _url_decode_impl(\\n s.split(separator), charset, include_empty, errors # type: ignore\\n )\\n )\\n\\n\\ndef url_decode_stream(\\n stream: t.IO[bytes],\\n charset: str = \\\"utf-8\\\",\\n include_empty: bool = True,\\n errors: str = \\\"replace\\\",\\n separator: bytes = b\\\"&\\\",\\n cls: t.Optional[t.Type[\\\"ds.MultiDict\\\"]] = None,\\n limit: t.Optional[int] = None,\\n) -> \\\"ds.MultiDict[str, str]\\\":\\n \\\"\\\"\\\"Works like :func:`url_decode` but decodes a stream. The behavior\\n of stream and limit follows functions like\\n :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is\\n directly fed to the `cls` so you can consume the data while it's\\n parsed.\\n\\n :param stream: a stream with the encoded querystring\\n :param charset: the charset of the query string. If set to `None`\\n no decoding will take place.\\n :param include_empty: Set to `False` if you don't want empty values to\\n appear in the dict.\\n :param errors: the decoding error behavior.\\n :param separator: the pair separator to be used, defaults to ``&``\\n :param cls: an optional dict class to use. If this is not specified\\n or `None` the default :class:`MultiDict` is used.\\n :param limit: the content length of the URL data. Not necessary if\\n a limited stream is provided.\\n\\n .. versionchanged:: 2.0\\n The ``decode_keys`` and ``return_iterator`` parameters are\\n deprecated and will be removed in Werkzeug 2.1.\\n\\n .. versionadded:: 0.8\\n \\\"\\\"\\\"\\n from .wsgi import make_chunk_iter\\n\\n pair_iter = make_chunk_iter(stream, separator, limit)\\n decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)\\n\\n if cls is None:\\n from .datastructures import MultiDict # noqa: F811\\n\\n cls = MultiDict\\n\\n return cls(decoder)\\n\\n\\ndef _url_decode_impl(\\n pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str\\n) -> t.Iterator[t.Tuple[str, str]]:\\n for pair in pair_iter:\\n if not pair:\\n continue\\n s = _make_encode_wrapper(pair)\\n equal = s(\\\"=\\\")\\n if equal in pair:\\n key, value = pair.split(equal, 1)\\n else:\\n if not include_empty:\\n continue\\n key = pair\\n value = s(\\\"\\\")\\n yield (\\n url_unquote_plus(key, charset, errors),\\n url_unquote_plus(value, charset, errors),\\n )\\n\\n\\ndef url_encode(\\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\\n charset: str = \\\"utf-8\\\",\\n sort: bool = False,\\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\\n separator: str = \\\"&\\\",\\n) -> str:\\n \\\"\\\"\\\"URL encode a dict/`MultiDict`. If a value is `None` it will not appear\\n in the result string. Per default only values are encoded into the target\\n charset strings.\\n\\n :param obj: the object to encode into a query string.\\n :param charset: the charset of the query string.\\n :param sort: set to `True` if you want parameters to be sorted by `key`.\\n :param separator: the separator to be used for the pairs.\\n :param key: an optional function to be used for sorting. For more details\\n check out the :func:`sorted` documentation.\\n\\n .. versionchanged:: 2.0\\n The ``encode_keys`` parameter is deprecated and will be removed\\n in Werkzeug 2.1.\\n\\n .. versionchanged:: 0.5\\n Added the ``sort``, ``key``, and ``separator`` parameters.\\n \\\"\\\"\\\"\\n separator = _to_str(separator, \\\"ascii\\\")\\n return separator.join(_url_encode_impl(obj, charset, sort, key))\\n\\n\\ndef url_encode_stream(\\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\\n stream: t.Optional[t.IO[str]] = None,\\n charset: str = \\\"utf-8\\\",\\n sort: bool = False,\\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\\n separator: str = \\\"&\\\",\\n) -> None:\\n \\\"\\\"\\\"Like :meth:`url_encode` but writes the results to a stream\\n object. If the stream is `None` a generator over all encoded\\n pairs is returned.\\n\\n :param obj: the object to encode into a query string.\\n :param stream: a stream to write the encoded object into or `None` if\\n an iterator over the encoded pairs should be returned. In\\n that case the separator argument is ignored.\\n :param charset: the charset of the query string.\\n :param sort: set to `True` if you want parameters to be sorted by `key`.\\n :param separator: the separator to be used for the pairs.\\n :param key: an optional function to be used for sorting. For more details\\n check out the :func:`sorted` documentation.\\n\\n .. versionchanged:: 2.0\\n The ``encode_keys`` parameter is deprecated and will be removed\\n in Werkzeug 2.1.\\n\\n .. versionadded:: 0.8\\n \\\"\\\"\\\"\\n separator = _to_str(separator, \\\"ascii\\\")\\n gen = _url_encode_impl(obj, charset, sort, key)\\n if stream is None:\\n return gen # type: ignore\\n for idx, chunk in enumerate(gen):\\n if idx:\\n stream.write(separator)\\n stream.write(chunk)\\n return None\\n\\n\\ndef url_join(\\n base: t.Union[str, t.Tuple[str, str, str, str, str]],\\n url: t.Union[str, t.Tuple[str, str, str, str, str]],\\n allow_fragments: bool = True,\\n) -> str:\\n \\\"\\\"\\\"Join a base URL and a possibly relative URL to form an absolute\\n interpretation of the latter.\\n\\n :param base: the base URL for the join operation.\\n :param url: the URL to join.\\n :param allow_fragments: indicates whether fragments should be allowed.\\n \\\"\\\"\\\"\\n if isinstance(base, tuple):\\n base = url_unparse(base)\\n if isinstance(url, tuple):\\n url = url_unparse(url)\\n\\n _check_str_tuple((base, url))\\n s = _make_encode_wrapper(base)\\n\\n if not base:\\n return url\\n if not url:\\n return base\\n\\n bscheme, bnetloc, bpath, bquery, bfragment = url_parse(\\n base, allow_fragments=allow_fragments\\n )\\n scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)\\n if scheme != bscheme:\\n return url\\n if netloc:\\n return url_unparse((scheme, netloc, path, query, fragment))\\n netloc = bnetloc\\n\\n if path[:1] == s(\\\"/\\\"):\\n segments = path.split(s(\\\"/\\\"))\\n elif not path:\\n segments = bpath.split(s(\\\"/\\\"))\\n if not query:\\n query = bquery\\n else:\\n segments = bpath.split(s(\\\"/\\\"))[:-1] + path.split(s(\\\"/\\\"))\\n\\n # If the rightmost part is \\\"./\\\" we want to keep the slash but\\n # remove the dot.\\n if segments[-1] == s(\\\".\\\"):\\n segments[-1] = s(\\\"\\\")\\n\\n # Resolve \\\"..\\\" and \\\".\\\"\\n segments = [segment for segment in segments if segment != s(\\\".\\\")]\\n while True:\\n i = 1\\n n = len(segments) - 1\\n while i < n:\\n if segments[i] == s(\\\"..\\\") and segments[i - 1] not in (s(\\\"\\\"), s(\\\"..\\\")):\\n del segments[i - 1 : i + 1]\\n break\\n i += 1\\n else:\\n break\\n\\n # Remove trailing \\\"..\\\" if the URL is absolute\\n unwanted_marker = [s(\\\"\\\"), s(\\\"..\\\")]\\n while segments[:2] == unwanted_marker:\\n del segments[1]\\n\\n path = s(\\\"/\\\").join(segments)\\n return url_unparse((scheme, netloc, path, query, fragment))\\n\",\n \"path\": \"src/werkzeug/urls.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/CHANGES.rst b/CHANGES.rst\nindex 88bee32ea..661c3d38a 100644\n--- a/CHANGES.rst\n+++ b/CHANGES.rst\n@@ -63,6 +63,8 @@ Unreleased\n a list, to custom URL converters. :issue:`2249`\n - ``run_simple`` shows instructions for dealing with \"address already\n in use\" errors, including extra instructions for macOS. :pr:`2321`\n+- Extend list of characters considered always safe in URLs based on\n+ :rfc:`3986`. :issue:`2319`\n \n \n Version 2.0.3\ndiff --git a/src/werkzeug/urls.py b/src/werkzeug/urls.py\nindex 1cb9418d2..67c08b0bc 100644\n--- a/src/werkzeug/urls.py\n+++ b/src/werkzeug/urls.py\n@@ -27,6 +27,7 @@\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n b\"0123456789\"\n b\"-._~\"\n+ b\"$!'()*+,;\" # RFC3986 sub-delims set, not including query string delimiters &=\n )\n )\n \ndiff --git a/tests/middleware/test_http_proxy.py b/tests/middleware/test_http_proxy.py\nindex b39cd3517..2b3bc55bf 100644\n--- a/tests/middleware/test_http_proxy.py\n+++ b/tests/middleware/test_http_proxy.py\n@@ -45,3 +45,7 @@ def test_http_proxy(standard_app):\n assert \"HTTP_X_SPECIAL\" not in r.json\n assert r.json[\"HTTP_HOST\"] == \"127.0.0.1\"\n assert r.json[\"PATH_INFO\"] == \"/autohost/aha\"\n+\n+ # test if characters allowed in URL are not encoded by proxy\n+ r = client.get(\"/autohost/$\")\n+ assert r.json[\"REQUEST_URI\"] == \"/autohost/$\"\n"}}},{"rowIdx":427,"cells":{"in_source_id":{"kind":"string","value":"wright-group__WrightTools-640"},"issue":{"kind":"string","value":"CITATION text file not distributed\n\n"},"before_files":{"kind":"list like","value":[{"content":"#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#! /usr/bin/env python3\\n\\nimport os\\nfrom setuptools import setup, find_packages\\n\\n\\nhere = os.path.abspath(os.path.dirname(__file__))\\n\\n\\ndef read(fname):\\n return open(os.path.join(here, fname)).read()\\n\\n\\nextra_files = {\\n \\\"WrightTools\\\": [\\n \\\"datasets\\\",\\n \\\"datasets/*\\\",\\n \\\"datasets/*/*\\\",\\n \\\"datasets/*/*/*\\\",\\n \\\"datasets/*/*/*/*\\\",\\n \\\"VERSION\\\",\\n \\\"WT5_VERSION\\\",\\n ]\\n}\\n\\nwith open(os.path.join(here, \\\"WrightTools\\\", \\\"VERSION\\\")) as version_file:\\n version = version_file.read().strip()\\n\\nsetup(\\n name=\\\"WrightTools\\\",\\n packages=find_packages(exclude=(\\\"tests\\\", \\\"tests.*\\\")),\\n package_data=extra_files,\\n python_requires=\\\">=3.5\\\",\\n setup_requires=[\\\"pytest-runner\\\"],\\n tests_require=[\\n \\\"pytest\\\",\\n \\\"pytest-cov\\\",\\n \\\"sphinx==1.6.5\\\",\\n \\\"sphinx-gallery==0.1.12\\\",\\n \\\"sphinx-rtd-theme\\\",\\n ],\\n install_requires=[\\n \\\"h5py\\\",\\n \\\"imageio\\\",\\n \\\"matplotlib>=2.0\\\",\\n \\\"numexpr\\\",\\n \\\"numpy\\\",\\n \\\"python-dateutil\\\",\\n \\\"pytz\\\",\\n \\\"scipy\\\",\\n \\\"tidy_headers\\\",\\n ],\\n extras_require={\\n \\\"docs\\\": [\\\"sphinx-gallery==0.1.12\\\"],\\n \\\"dev\\\": [\\\"black\\\", \\\"pre-commit\\\", \\\"pydocstyle\\\"],\\n },\\n version=version,\\n description=\\\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\\\",\\n long_description=read(\\\"README.rst\\\"),\\n author=\\\"WrightTools Developers\\\",\\n license=\\\"MIT\\\",\\n url=\\\"http://wright.tools\\\",\\n keywords=\\\"spectroscopy science multidimensional visualization\\\",\\n classifiers=[\\n \\\"Development Status :: 5 - Production/Stable\\\",\\n \\\"Intended Audience :: Science/Research\\\",\\n \\\"License :: OSI Approved :: MIT License\\\",\\n \\\"Natural Language :: English\\\",\\n \\\"Programming Language :: Python :: 3\\\",\\n \\\"Programming Language :: Python :: 3.5\\\",\\n \\\"Programming Language :: Python :: 3.6\\\",\\n \\\"Topic :: Scientific/Engineering\\\",\\n ],\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#! /usr/bin/env python3\\n\\nimport os\\nfrom setuptools import setup, find_packages\\n\\n\\nhere = os.path.abspath(os.path.dirname(__file__))\\n\\n\\ndef read(fname):\\n return open(os.path.join(here, fname)).read()\\n\\n\\nextra_files = {\\n \\\"WrightTools\\\": [\\n \\\"datasets\\\",\\n \\\"datasets/*\\\",\\n \\\"datasets/*/*\\\",\\n \\\"datasets/*/*/*\\\",\\n \\\"datasets/*/*/*/*\\\",\\n \\\"CITATION\\\",\\n \\\"VERSION\\\",\\n \\\"WT5_VERSION\\\",\\n ]\\n}\\n\\nwith open(os.path.join(here, \\\"WrightTools\\\", \\\"VERSION\\\")) as version_file:\\n version = version_file.read().strip()\\n\\nsetup(\\n name=\\\"WrightTools\\\",\\n packages=find_packages(exclude=(\\\"tests\\\", \\\"tests.*\\\")),\\n package_data=extra_files,\\n python_requires=\\\">=3.5\\\",\\n setup_requires=[\\\"pytest-runner\\\"],\\n tests_require=[\\n \\\"pytest\\\",\\n \\\"pytest-cov\\\",\\n \\\"sphinx==1.6.5\\\",\\n \\\"sphinx-gallery==0.1.12\\\",\\n \\\"sphinx-rtd-theme\\\",\\n ],\\n install_requires=[\\n \\\"h5py\\\",\\n \\\"imageio\\\",\\n \\\"matplotlib>=2.0\\\",\\n \\\"numexpr\\\",\\n \\\"numpy\\\",\\n \\\"python-dateutil\\\",\\n \\\"pytz\\\",\\n \\\"scipy\\\",\\n \\\"tidy_headers\\\",\\n ],\\n extras_require={\\n \\\"docs\\\": [\\\"sphinx-gallery==0.1.12\\\"],\\n \\\"dev\\\": [\\\"black\\\", \\\"pre-commit\\\", \\\"pydocstyle\\\"],\\n },\\n version=version,\\n description=\\\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\\\",\\n long_description=read(\\\"README.rst\\\"),\\n author=\\\"WrightTools Developers\\\",\\n license=\\\"MIT\\\",\\n url=\\\"http://wright.tools\\\",\\n keywords=\\\"spectroscopy science multidimensional visualization\\\",\\n classifiers=[\\n \\\"Development Status :: 5 - Production/Stable\\\",\\n \\\"Intended Audience :: Science/Research\\\",\\n \\\"License :: OSI Approved :: MIT License\\\",\\n \\\"Natural Language :: English\\\",\\n \\\"Programming Language :: Python :: 3\\\",\\n \\\"Programming Language :: Python :: 3.5\\\",\\n \\\"Programming Language :: Python :: 3.6\\\",\\n \\\"Topic :: Scientific/Engineering\\\",\\n ],\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex d79eb6dfa..fa23eb776 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -18,6 +18,7 @@ def read(fname):\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n+ \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n"}}},{"rowIdx":428,"cells":{"in_source_id":{"kind":"string","value":"typeddjango__django-stubs-1782"},"issue":{"kind":"string","value":"Make mypy a soft dependency\nGiven that there are several popular alternatives to mypy (e.g. pyright and pytype), mypy should be an optional dependency, installable with e.g. `django-stubs[mypy]`. \r\n\r\nI haven't tested it myself yet, but if `django-stubs` doesn't work with these \"alternative\" typecheckers, then I'd suggest that it should explicitly mentioned that this is a mypy-only stubs package. \n"},"before_files":{"kind":"list like","value":[{"content":"#!/usr/bin/env python\nimport os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=1.0.0\",\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\nextras_require = {\n \"compatible-mypy\": [\"mypy~=1.6.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"4.2.5\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=[\"LICENSE.md\"],\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"maxim.kurnikov@gmail.com\",\n maintainer=\"Marti Raudsepp\",\n maintainer_email=\"marti@juffo.org\",\n py_modules=[],\n python_requires=\">=3.8\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#!/usr/bin/env python\\nimport os\\nfrom typing import List\\n\\nfrom setuptools import find_packages, setup\\n\\n\\ndef find_stub_files(name: str) -> List[str]:\\n result = []\\n for root, _dirs, files in os.walk(name):\\n for file in files:\\n if file.endswith(\\\".pyi\\\"):\\n if os.path.sep in root:\\n sub_root = root.split(os.path.sep, 1)[-1]\\n file = os.path.join(sub_root, file)\\n result.append(file)\\n return result\\n\\n\\nwith open(\\\"README.md\\\") as f:\\n readme = f.read()\\n\\ndependencies = [\\n \\\"mypy>=1.0.0\\\",\\n \\\"django\\\",\\n \\\"django-stubs-ext>=4.2.5\\\",\\n \\\"tomli; python_version < '3.11'\\\",\\n # Types:\\n \\\"typing-extensions\\\",\\n \\\"types-pytz\\\",\\n \\\"types-PyYAML\\\",\\n]\\n\\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\\nextras_require = {\\n \\\"compatible-mypy\\\": [\\\"mypy~=1.6.0\\\"],\\n}\\n\\nsetup(\\n name=\\\"django-stubs\\\",\\n version=\\\"4.2.5\\\",\\n description=\\\"Mypy stubs for Django\\\",\\n long_description=readme,\\n long_description_content_type=\\\"text/markdown\\\",\\n license=\\\"MIT\\\",\\n license_files=[\\\"LICENSE.md\\\"],\\n url=\\\"https://github.com/typeddjango/django-stubs\\\",\\n author=\\\"Maksim Kurnikov\\\",\\n author_email=\\\"maxim.kurnikov@gmail.com\\\",\\n maintainer=\\\"Marti Raudsepp\\\",\\n maintainer_email=\\\"marti@juffo.org\\\",\\n py_modules=[],\\n python_requires=\\\">=3.8\\\",\\n install_requires=dependencies,\\n extras_require=extras_require,\\n packages=[\\\"django-stubs\\\", *find_packages(exclude=[\\\"scripts\\\"])],\\n package_data={\\n \\\"django-stubs\\\": find_stub_files(\\\"django-stubs\\\"),\\n \\\"mypy_django_plugin\\\": [\\\"py.typed\\\"],\\n },\\n classifiers=[\\n \\\"License :: OSI Approved :: MIT License\\\",\\n \\\"Operating System :: OS Independent\\\",\\n \\\"Programming Language :: Python :: 3.8\\\",\\n \\\"Programming Language :: Python :: 3.9\\\",\\n \\\"Programming Language :: Python :: 3.10\\\",\\n \\\"Programming Language :: Python :: 3.11\\\",\\n \\\"Programming Language :: Python :: 3.12\\\",\\n \\\"Typing :: Typed\\\",\\n \\\"Framework :: Django\\\",\\n \\\"Framework :: Django :: 3.2\\\",\\n \\\"Framework :: Django :: 4.1\\\",\\n \\\"Framework :: Django :: 4.2\\\",\\n ],\\n project_urls={\\n \\\"Release notes\\\": \\\"https://github.com/typeddjango/django-stubs/releases\\\",\\n },\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"#!/usr/bin/env python\nimport os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\nextras_require = {\n \"compatible-mypy\": [\"mypy~=1.6.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"4.2.5\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=[\"LICENSE.md\"],\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"maxim.kurnikov@gmail.com\",\n maintainer=\"Marti Raudsepp\",\n maintainer_email=\"marti@juffo.org\",\n py_modules=[],\n python_requires=\">=3.8\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n","path":"setup.py"}],"string":"[\n {\n \"content\": \"#!/usr/bin/env python\\nimport os\\nfrom typing import List\\n\\nfrom setuptools import find_packages, setup\\n\\n\\ndef find_stub_files(name: str) -> List[str]:\\n result = []\\n for root, _dirs, files in os.walk(name):\\n for file in files:\\n if file.endswith(\\\".pyi\\\"):\\n if os.path.sep in root:\\n sub_root = root.split(os.path.sep, 1)[-1]\\n file = os.path.join(sub_root, file)\\n result.append(file)\\n return result\\n\\n\\nwith open(\\\"README.md\\\") as f:\\n readme = f.read()\\n\\ndependencies = [\\n \\\"django\\\",\\n \\\"django-stubs-ext>=4.2.5\\\",\\n \\\"tomli; python_version < '3.11'\\\",\\n # Types:\\n \\\"typing-extensions\\\",\\n \\\"types-pytz\\\",\\n \\\"types-PyYAML\\\",\\n]\\n\\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\\nextras_require = {\\n \\\"compatible-mypy\\\": [\\\"mypy~=1.6.0\\\"],\\n}\\n\\nsetup(\\n name=\\\"django-stubs\\\",\\n version=\\\"4.2.5\\\",\\n description=\\\"Mypy stubs for Django\\\",\\n long_description=readme,\\n long_description_content_type=\\\"text/markdown\\\",\\n license=\\\"MIT\\\",\\n license_files=[\\\"LICENSE.md\\\"],\\n url=\\\"https://github.com/typeddjango/django-stubs\\\",\\n author=\\\"Maksim Kurnikov\\\",\\n author_email=\\\"maxim.kurnikov@gmail.com\\\",\\n maintainer=\\\"Marti Raudsepp\\\",\\n maintainer_email=\\\"marti@juffo.org\\\",\\n py_modules=[],\\n python_requires=\\\">=3.8\\\",\\n install_requires=dependencies,\\n extras_require=extras_require,\\n packages=[\\\"django-stubs\\\", *find_packages(exclude=[\\\"scripts\\\"])],\\n package_data={\\n \\\"django-stubs\\\": find_stub_files(\\\"django-stubs\\\"),\\n \\\"mypy_django_plugin\\\": [\\\"py.typed\\\"],\\n },\\n classifiers=[\\n \\\"License :: OSI Approved :: MIT License\\\",\\n \\\"Operating System :: OS Independent\\\",\\n \\\"Programming Language :: Python :: 3.8\\\",\\n \\\"Programming Language :: Python :: 3.9\\\",\\n \\\"Programming Language :: Python :: 3.10\\\",\\n \\\"Programming Language :: Python :: 3.11\\\",\\n \\\"Programming Language :: Python :: 3.12\\\",\\n \\\"Typing :: Typed\\\",\\n \\\"Framework :: Django\\\",\\n \\\"Framework :: Django :: 3.2\\\",\\n \\\"Framework :: Django :: 4.1\\\",\\n \\\"Framework :: Django :: 4.2\\\",\\n ],\\n project_urls={\\n \\\"Release notes\\\": \\\"https://github.com/typeddjango/django-stubs/releases\\\",\\n },\\n)\\n\",\n \"path\": \"setup.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex b5067b3fe..5973d2e63 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,6 @@ def find_stub_files(name: str) -> List[str]:\n readme = f.read()\n \n dependencies = [\n- \"mypy>=1.0.0\",\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n"}}},{"rowIdx":429,"cells":{"in_source_id":{"kind":"string","value":"optuna__optuna-4965"},"issue":{"kind":"string","value":"Use `__future__.annotations` everywhere in the Optuna code base\n### Motivation\r\n\r\nOptuna drops Python 3.6 from v3.1, so we can use `__future__.annotations`, which simplifies the code base. See [PEP 563](https://peps.python.org/pep-0563/), [PEP584](https://peps.python.org/pep-0584/), [PEP 585](https://peps.python.org/pep-0585/), and [PEP 604](https://peps.python.org/pep-0604/) for more details. This issue suggests to use the module and simplifies the code base.\r\n\r\n### Suggestion\r\n\r\nUse `__future__.annotations` for each file and simplify the type annotations. The list of classes whose type annotations can be simplified is [here](https://peps.python.org/pep-0585/#implementation). The list of files where the `__future__.annotations` can be used is as follows. In order to reduce review costs and to encourage more contributors to work on it, please, as a rule, fix one file per PR.\r\n\r\n- [x] optuna/_convert_positional_args.py\r\n- [x] optuna/visualization/_optimization_history.py\r\n- [x] optuna/visualization/_hypervolume_history.py\r\n- [x] optuna/visualization/_edf.py\r\n- [x] optuna/visualization/_pareto_front.py\r\n- [x] optuna/visualization/matplotlib/_optimization_history.py\r\n- [x] optuna/visualization/matplotlib/_hypervolume_history.py\r\n- [x] optuna/visualization/matplotlib/_edf.py\r\n- [x] optuna/visualization/matplotlib/_pareto_front.py\r\n- [x] optuna/visualization/matplotlib/_contour.py\r\n- [x] optuna/visualization/_utils.py\r\n- [x] optuna/logging.py\r\n- [ ] optuna/storages/_base.py\r\n- [ ] optuna/storages/_cached_storage.py\r\n- [ ] optuna/storages/__init__.py\r\n- [ ] optuna/storages/_heartbeat.py\r\n- [ ] optuna/storages/_in_memory.py\r\n- [ ] optuna/storages/_rdb/models.py\r\n- [ ] optuna/storages/_rdb/storage.py\r\n- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.c.py\r\n- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.d.py\r\n- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.a.py\r\n- [ ] optuna/storages/_journal/file.py\r\n- [ ] optuna/storages/_journal/redis.py\r\n- [ ] optuna/storages/_journal/storage.py\r\n- [ ] optuna/storages/_journal/base.py\r\n- [ ] optuna/study/_dataframe.py\r\n- [ ] optuna/study/_optimize.py\r\n- [ ] optuna/study/_tell.py\r\n- [ ] optuna/study/_multi_objective.py\r\n- [ ] optuna/study/_frozen.py\r\n- [ ] optuna/study/study.py\r\n- [ ] optuna/study/_study_summary.py\r\n- [ ] optuna/search_space/group_decomposed.py\r\n- [ ] optuna/search_space/intersection.py\r\n- [ ] optuna/_typing.py\r\n- [ ] optuna/_deprecated.py\r\n- [ ] optuna/pruners/_hyperband.py\r\n- [ ] optuna/pruners/_patient.py\r\n- [ ] optuna/pruners/_successive_halving.py\r\n- [ ] optuna/pruners/_percentile.py\r\n- [ ] optuna/pruners/_threshold.py\r\n- [ ] optuna/trial/_base.py\r\n- [ ] optuna/trial/_fixed.py\r\n- [ ] optuna/trial/_trial.py\r\n- [ ] optuna/trial/_frozen.py\r\n- [ ] optuna/integration/cma.py\r\n- [ ] optuna/integration/shap.py\r\n- [ ] optuna/integration/lightgbm.py\r\n- [ ] optuna/integration/pytorch_distributed.py\r\n- [ ] optuna/integration/_lightgbm_tuner/optimize.py\r\n- [ ] optuna/integration/_lightgbm_tuner/alias.py\r\n- [ ] optuna/integration/mlflow.py\r\n- [ ] optuna/integration/wandb.py\r\n- [ ] optuna/integration/catboost.py\r\n- [ ] optuna/integration/skopt.py\r\n- [ ] optuna/integration/botorch.py\r\n- [ ] optuna/integration/dask.py\r\n- [x] optuna/integration/sklearn.py\r\n- [ ] optuna/integration/tensorboard.py\r\n- [ ] optuna/terminator/callback.py\r\n- [ ] optuna/terminator/terminator.py\r\n- [ ] optuna/terminator/improvement/_preprocessing.py\r\n- [ ] optuna/terminator/improvement/gp/botorch.py\r\n- [ ] optuna/terminator/improvement/gp/base.py\r\n- [ ] optuna/terminator/improvement/evaluator.py\r\n- [ ] optuna/importance/_base.py\r\n- [ ] optuna/importance/_mean_decrease_impurity.py\r\n- [ ] optuna/importance/__init__.py\r\n- [ ] optuna/importance/_fanova/_fanova.py\r\n- [ ] optuna/importance/_fanova/_evaluator.py\r\n- [ ] optuna/importance/_fanova/_tree.py\r\n- [ ] optuna/_imports.py\r\n- [ ] optuna/testing/tempfile_pool.py\r\n- [ ] optuna/testing/threading.py\r\n- [ ] optuna/testing/distributions.py\r\n- [ ] optuna/testing/samplers.py\r\n- [ ] optuna/testing/storages.py\r\n- [ ] optuna/distributions.py\r\n- [ ] optuna/cli.py\r\n- [ ] optuna/multi_objective/visualization/_pareto_front.py\r\n- [ ] optuna/multi_objective/trial.py\r\n- [ ] optuna/multi_objective/samplers/_base.py\r\n- [ ] optuna/multi_objective/samplers/_nsga2.py\r\n- [ ] optuna/multi_objective/samplers/_adapter.py\r\n- [ ] optuna/multi_objective/samplers/_random.py\r\n- [ ] optuna/multi_objective/samplers/_motpe.py\r\n- [ ] optuna/multi_objective/study.py\r\n- [ ] optuna/_experimental.py\r\n- [ ] optuna/samplers/_base.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_undx.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_spx.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_sbx.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_vsbx.py\r\n- [ ] optuna/samplers/nsgaii/_sampler.py\r\n- [ ] optuna/samplers/nsgaii/_crossover.py\r\n- [ ] optuna/samplers/_search_space/intersection.py\r\n- [ ] optuna/samplers/_qmc.py\r\n- [ ] optuna/samplers/_tpe/probability_distributions.py\r\n- [ ] optuna/samplers/_tpe/_truncnorm.py\r\n- [ ] optuna/samplers/_tpe/multi_objective_sampler.py\r\n- [ ] optuna/samplers/_tpe/parzen_estimator.py\r\n- [ ] optuna/samplers/_tpe/sampler.py\r\n- [ ] optuna/samplers/_random.py\r\n- [ ] optuna/samplers/_cmaes.py\r\n- [ ] optuna/samplers/_partial_fixed.py\r\n- [ ] optuna/samplers/_brute_force.py\r\n- [ ] optuna/samplers/_nsgaiii.py\r\n- [ ] optuna/samplers/_grid.py\r\n- [ ] optuna/_hypervolume/wfg.py\r\n- [ ] optuna/_hypervolume/hssp.py\r\n- [ ] optuna/progress_bar.py\r\n- [ ] optuna/_transform.py\r\n- [ ] optuna/_callbacks.py\r\n- [ ] tests/multi_objective_tests/test_study.py\r\n- [ ] tests/multi_objective_tests/samplers_tests/test_motpe.py\r\n- [ ] tests/multi_objective_tests/samplers_tests/test_nsga2.py\r\n- [ ] tests/multi_objective_tests/test_trial.py\r\n- [ ] tests/multi_objective_tests/visualization_tests/test_pareto_front.py\r\n- [ ] tests/trial_tests/test_frozen.py\r\n- [ ] tests/trial_tests/test_trials.py\r\n- [ ] tests/trial_tests/test_trial.py\r\n- [ ] tests/pruners_tests/test_percentile.py\r\n- [ ] tests/pruners_tests/test_median.py\r\n- [ ] tests/pruners_tests/test_patient.py\r\n- [ ] tests/pruners_tests/test_successive_halving.py\r\n- [ ] tests/study_tests/test_optimize.py\r\n- [ ] tests/study_tests/test_study.py\r\n- [ ] tests/hypervolume_tests/test_hssp.py\r\n- [x] tests/integration_tests/test_skopt.py\r\n- [x] tests/integration_tests/test_pytorch_lightning.py\r\n- [ ] tests/integration_tests/test_shap.py\r\n- [ ] tests/integration_tests/test_cma.py\r\n- [ ] tests/integration_tests/test_pytorch_distributed.py\r\n- [ ] tests/integration_tests/lightgbm_tuner_tests/test_optimize.py\r\n- [ ] tests/integration_tests/lightgbm_tuner_tests/test_alias.py\r\n- [ ] tests/integration_tests/test_botorch.py\r\n- [ ] tests/integration_tests/test_mlflow.py\r\n- [ ] tests/integration_tests/test_mxnet.py\r\n- [ ] tests/integration_tests/test_wandb.py\r\n- [ ] tests/importance_tests/fanova_tests/test_tree.py\r\n- [ ] tests/importance_tests/test_mean_decrease_impurity.py\r\n- [ ] tests/importance_tests/test_fanova.py\r\n- [ ] tests/importance_tests/test_init.py\r\n- [ ] tests/test_convert_positional_args.py\r\n- [ ] tests/test_deprecated.py\r\n- [ ] tests/storages_tests/test_journal.py\r\n- [ ] tests/storages_tests/test_heartbeat.py\r\n- [ ] tests/storages_tests/test_storages.py\r\n- [ ] tests/storages_tests/rdb_tests/test_storage.py\r\n- [ ] tests/storages_tests/rdb_tests/create_db.py\r\n- [ ] tests/storages_tests/test_with_server.py\r\n- [ ] tests/samplers_tests/test_grid.py\r\n- [ ] tests/samplers_tests/tpe_tests/test_parzen_estimator.py\r\n- [ ] tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py\r\n- [ ] tests/samplers_tests/tpe_tests/test_sampler.py\r\n- [ ] tests/samplers_tests/test_cmaes.py\r\n- [ ] tests/samplers_tests/test_samplers.py\r\n- [x] tests/samplers_tests/test_nsgaii.py\r\n- [x] tests/samplers_tests/test_nsgaiii.py\r\n- [ ] tests/samplers_tests/test_qmc.py\r\n- [ ] tests/test_distributions.py\r\n- [ ] tests/test_multi_objective.py\r\n- [ ] tests/test_cli.py\r\n- [ ] tests/visualization_tests/test_hypervolume_history.py\r\n- [ ] tests/visualization_tests/test_pareto_front.py\r\n- [ ] tests/terminator_tests/improvement_tests/test_evaluator.py\r\n- [ ] benchmarks/kurobako/problems/wfg/transformation_functions.py\r\n- [ ] benchmarks/bayesmark/report_bayesmark.py\r\n- [ ] benchmarks/bayesmark/optuna_optimizer.py\r\n\r\n\r\n### Additional context (optional)\r\n\r\nThe above list is generated by the following script.\r\n\r\n
\r\nscript\r\n\r\n```python\r\nimport os\r\nimport pathlib\r\n\r\n\r\nPATTERS = [\r\n \"from typing import Union\",\r\n \"from typing import Optional\",\r\n \"from typing import Tuple\",\r\n \"from typing import List\",\r\n \"from typing import Dict\",\r\n \"from typing import Set\",\r\n \"from typing import FrozenSet\",\r\n \"from typing import Type\",\r\n \"from typing import FrozenSet\",\r\n \"from typing import Sequence\",\r\n]\r\n\r\n\r\ndef get_filenames_to_be_simplified(dir_path):\r\n ret = []\r\n for f in os.listdir(dir_path):\r\n file_path = os.path.join(dir_path, f)\r\n if not os.path.isfile(file_path):\r\n ret.extend(get_filenames_to_be_simplified(file_path))\r\n else:\r\n try:\r\n with open(file_path) as fd:\r\n contents = fd.read()\r\n if any([s in contents for s in PATTERS]):\r\n ret.append(str(file_path))\r\n except UnicodeDecodeError as e:\r\n pass\r\n return ret\r\n\r\n\r\ndef main():\r\n dirs = [\"optuna\", \"tests\", \"benchmarks\"]\r\n\r\n for dir_name in dirs:\r\n filenames = get_filenames_to_be_simplified(pathlib.Path(dir_name))\r\n for filename in filenames:\r\n print(f\"- [ ] {filename}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\n
\n"},"before_files":{"kind":"list like","value":[{"content":"from __future__ import annotations\n\nfrom typing import NamedTuple\nfrom typing import Sequence\n\nimport numpy as np\n\nfrom optuna._experimental import experimental_func\nfrom optuna._hypervolume import WFG\nfrom optuna.logging import get_logger\nfrom optuna.samplers._base import _CONSTRAINTS_KEY\nfrom optuna.study import Study\nfrom optuna.study._multi_objective import _dominates\nfrom optuna.study._study_direction import StudyDirection\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n\n_logger = get_logger(__name__)\n\n\nclass _HypervolumeHistoryInfo(NamedTuple):\n trial_numbers: list[int]\n values: list[float]\n\n\n@experimental_func(\"3.3.0\")\ndef plot_hypervolume_history(\n study: Study,\n reference_point: Sequence[float],\n) -> \"go.Figure\":\n \"\"\"Plot hypervolume history of all trials in a study.\n\n Example:\n\n The following code snippet shows how to plot optimization history.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", 0, 5)\n y = trial.suggest_float(\"y\", 0, 3)\n\n v0 = 4 * x ** 2 + 4 * y ** 2\n v1 = (x - 5) ** 2 + (y - 5) ** 2\n return v0, v1\n\n\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(objective, n_trials=50)\n\n reference_point=[100., 50.]\n fig = optuna.visualization.plot_hypervolume_history(study, reference_point)\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.\n The number of objectives must be 2 or more.\n\n reference_point:\n A reference point to use for hypervolume computation.\n The dimension of the reference point must be the same as the number of objectives.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _imports.check()\n\n if not study._is_multi_objective():\n raise ValueError(\n \"Study must be multi-objective. For single-objective optimization, \"\n \"please use plot_optimization_history instead.\"\n )\n\n if len(reference_point) != len(study.directions):\n raise ValueError(\n \"The dimension of the reference point must be the same as the number of objectives.\"\n )\n\n info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))\n return _get_hypervolume_history_plot(info)\n\n\ndef _get_hypervolume_history_plot(\n info: _HypervolumeHistoryInfo,\n) -> \"go.Figure\":\n layout = go.Layout(\n title=\"Hypervolume History Plot\",\n xaxis={\"title\": \"Trial\"},\n yaxis={\"title\": \"Hypervolume\"},\n )\n\n data = go.Scatter(\n x=info.trial_numbers,\n y=info.values,\n mode=\"lines+markers\",\n )\n return go.Figure(data=data, layout=layout)\n\n\ndef _get_hypervolume_history_info(\n study: Study,\n reference_point: np.ndarray,\n) -> _HypervolumeHistoryInfo:\n completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))\n\n if len(completed_trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n\n # Our hypervolume computation module assumes that all objectives are minimized.\n # Here we transform the objective values and the reference point.\n signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])\n minimization_reference_point = signs * reference_point\n\n # Only feasible trials are considered in hypervolume computation.\n trial_numbers = []\n values = []\n best_trials: list[FrozenTrial] = []\n hypervolume = 0.0\n for trial in completed_trials:\n trial_numbers.append(trial.number)\n\n has_constraints = _CONSTRAINTS_KEY in trial.system_attrs\n if has_constraints:\n constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]\n if any(map(lambda x: x > 0.0, constraints_values)):\n # The trial is infeasible.\n values.append(hypervolume)\n continue\n\n if any(map(lambda t: _dominates(t, trial, study.directions), best_trials)):\n # The trial is not on the Pareto front.\n values.append(hypervolume)\n continue\n\n best_trials = list(\n filter(lambda t: not _dominates(trial, t, study.directions), best_trials)\n ) + [trial]\n\n solution_set = np.asarray(\n list(\n filter(\n lambda v: (v <= minimization_reference_point).all(),\n [signs * trial.values for trial in best_trials],\n )\n )\n )\n if solution_set.size > 0:\n hypervolume = WFG().compute(solution_set, minimization_reference_point)\n values.append(hypervolume)\n\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any feasible trials.\")\n\n return _HypervolumeHistoryInfo(trial_numbers, values)\n","path":"optuna/visualization/_hypervolume_history.py"}],"string":"[\n {\n \"content\": \"from __future__ import annotations\\n\\nfrom typing import NamedTuple\\nfrom typing import Sequence\\n\\nimport numpy as np\\n\\nfrom optuna._experimental import experimental_func\\nfrom optuna._hypervolume import WFG\\nfrom optuna.logging import get_logger\\nfrom optuna.samplers._base import _CONSTRAINTS_KEY\\nfrom optuna.study import Study\\nfrom optuna.study._multi_objective import _dominates\\nfrom optuna.study._study_direction import StudyDirection\\nfrom optuna.trial import FrozenTrial\\nfrom optuna.trial import TrialState\\nfrom optuna.visualization._plotly_imports import _imports\\n\\n\\nif _imports.is_successful():\\n from optuna.visualization._plotly_imports import go\\n\\n_logger = get_logger(__name__)\\n\\n\\nclass _HypervolumeHistoryInfo(NamedTuple):\\n trial_numbers: list[int]\\n values: list[float]\\n\\n\\n@experimental_func(\\\"3.3.0\\\")\\ndef plot_hypervolume_history(\\n study: Study,\\n reference_point: Sequence[float],\\n) -> \\\"go.Figure\\\":\\n \\\"\\\"\\\"Plot hypervolume history of all trials in a study.\\n\\n Example:\\n\\n The following code snippet shows how to plot optimization history.\\n\\n .. plotly::\\n\\n import optuna\\n\\n\\n def objective(trial):\\n x = trial.suggest_float(\\\"x\\\", 0, 5)\\n y = trial.suggest_float(\\\"y\\\", 0, 3)\\n\\n v0 = 4 * x ** 2 + 4 * y ** 2\\n v1 = (x - 5) ** 2 + (y - 5) ** 2\\n return v0, v1\\n\\n\\n study = optuna.create_study(directions=[\\\"minimize\\\", \\\"minimize\\\"])\\n study.optimize(objective, n_trials=50)\\n\\n reference_point=[100., 50.]\\n fig = optuna.visualization.plot_hypervolume_history(study, reference_point)\\n fig.show()\\n\\n Args:\\n study:\\n A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.\\n The number of objectives must be 2 or more.\\n\\n reference_point:\\n A reference point to use for hypervolume computation.\\n The dimension of the reference point must be the same as the number of objectives.\\n\\n Returns:\\n A :class:`plotly.graph_objs.Figure` object.\\n \\\"\\\"\\\"\\n\\n _imports.check()\\n\\n if not study._is_multi_objective():\\n raise ValueError(\\n \\\"Study must be multi-objective. For single-objective optimization, \\\"\\n \\\"please use plot_optimization_history instead.\\\"\\n )\\n\\n if len(reference_point) != len(study.directions):\\n raise ValueError(\\n \\\"The dimension of the reference point must be the same as the number of objectives.\\\"\\n )\\n\\n info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))\\n return _get_hypervolume_history_plot(info)\\n\\n\\ndef _get_hypervolume_history_plot(\\n info: _HypervolumeHistoryInfo,\\n) -> \\\"go.Figure\\\":\\n layout = go.Layout(\\n title=\\\"Hypervolume History Plot\\\",\\n xaxis={\\\"title\\\": \\\"Trial\\\"},\\n yaxis={\\\"title\\\": \\\"Hypervolume\\\"},\\n )\\n\\n data = go.Scatter(\\n x=info.trial_numbers,\\n y=info.values,\\n mode=\\\"lines+markers\\\",\\n )\\n return go.Figure(data=data, layout=layout)\\n\\n\\ndef _get_hypervolume_history_info(\\n study: Study,\\n reference_point: np.ndarray,\\n) -> _HypervolumeHistoryInfo:\\n completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))\\n\\n if len(completed_trials) == 0:\\n _logger.warning(\\\"Your study does not have any completed trials.\\\")\\n\\n # Our hypervolume computation module assumes that all objectives are minimized.\\n # Here we transform the objective values and the reference point.\\n signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])\\n minimization_reference_point = signs * reference_point\\n\\n # Only feasible trials are considered in hypervolume computation.\\n trial_numbers = []\\n values = []\\n best_trials: list[FrozenTrial] = []\\n hypervolume = 0.0\\n for trial in completed_trials:\\n trial_numbers.append(trial.number)\\n\\n has_constraints = _CONSTRAINTS_KEY in trial.system_attrs\\n if has_constraints:\\n constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]\\n if any(map(lambda x: x > 0.0, constraints_values)):\\n # The trial is infeasible.\\n values.append(hypervolume)\\n continue\\n\\n if any(map(lambda t: _dominates(t, trial, study.directions), best_trials)):\\n # The trial is not on the Pareto front.\\n values.append(hypervolume)\\n continue\\n\\n best_trials = list(\\n filter(lambda t: not _dominates(trial, t, study.directions), best_trials)\\n ) + [trial]\\n\\n solution_set = np.asarray(\\n list(\\n filter(\\n lambda v: (v <= minimization_reference_point).all(),\\n [signs * trial.values for trial in best_trials],\\n )\\n )\\n )\\n if solution_set.size > 0:\\n hypervolume = WFG().compute(solution_set, minimization_reference_point)\\n values.append(hypervolume)\\n\\n if len(best_trials) == 0:\\n _logger.warning(\\\"Your study does not have any feasible trials.\\\")\\n\\n return _HypervolumeHistoryInfo(trial_numbers, values)\\n\",\n \"path\": \"optuna/visualization/_hypervolume_history.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"from __future__ import annotations\n\nfrom collections.abc import Sequence\nfrom typing import NamedTuple\n\nimport numpy as np\n\nfrom optuna._experimental import experimental_func\nfrom optuna._hypervolume import WFG\nfrom optuna.logging import get_logger\nfrom optuna.samplers._base import _CONSTRAINTS_KEY\nfrom optuna.study import Study\nfrom optuna.study._multi_objective import _dominates\nfrom optuna.study._study_direction import StudyDirection\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n\n_logger = get_logger(__name__)\n\n\nclass _HypervolumeHistoryInfo(NamedTuple):\n trial_numbers: list[int]\n values: list[float]\n\n\n@experimental_func(\"3.3.0\")\ndef plot_hypervolume_history(\n study: Study,\n reference_point: Sequence[float],\n) -> \"go.Figure\":\n \"\"\"Plot hypervolume history of all trials in a study.\n\n Example:\n\n The following code snippet shows how to plot optimization history.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", 0, 5)\n y = trial.suggest_float(\"y\", 0, 3)\n\n v0 = 4 * x ** 2 + 4 * y ** 2\n v1 = (x - 5) ** 2 + (y - 5) ** 2\n return v0, v1\n\n\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(objective, n_trials=50)\n\n reference_point=[100., 50.]\n fig = optuna.visualization.plot_hypervolume_history(study, reference_point)\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.\n The number of objectives must be 2 or more.\n\n reference_point:\n A reference point to use for hypervolume computation.\n The dimension of the reference point must be the same as the number of objectives.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _imports.check()\n\n if not study._is_multi_objective():\n raise ValueError(\n \"Study must be multi-objective. For single-objective optimization, \"\n \"please use plot_optimization_history instead.\"\n )\n\n if len(reference_point) != len(study.directions):\n raise ValueError(\n \"The dimension of the reference point must be the same as the number of objectives.\"\n )\n\n info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))\n return _get_hypervolume_history_plot(info)\n\n\ndef _get_hypervolume_history_plot(\n info: _HypervolumeHistoryInfo,\n) -> \"go.Figure\":\n layout = go.Layout(\n title=\"Hypervolume History Plot\",\n xaxis={\"title\": \"Trial\"},\n yaxis={\"title\": \"Hypervolume\"},\n )\n\n data = go.Scatter(\n x=info.trial_numbers,\n y=info.values,\n mode=\"lines+markers\",\n )\n return go.Figure(data=data, layout=layout)\n\n\ndef _get_hypervolume_history_info(\n study: Study,\n reference_point: np.ndarray,\n) -> _HypervolumeHistoryInfo:\n completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))\n\n if len(completed_trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n\n # Our hypervolume computation module assumes that all objectives are minimized.\n # Here we transform the objective values and the reference point.\n signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])\n minimization_reference_point = signs * reference_point\n\n # Only feasible trials are considered in hypervolume computation.\n trial_numbers = []\n values = []\n best_trials: list[FrozenTrial] = []\n hypervolume = 0.0\n for trial in completed_trials:\n trial_numbers.append(trial.number)\n\n has_constraints = _CONSTRAINTS_KEY in trial.system_attrs\n if has_constraints:\n constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]\n if any(map(lambda x: x > 0.0, constraints_values)):\n # The trial is infeasible.\n values.append(hypervolume)\n continue\n\n if any(map(lambda t: _dominates(t, trial, study.directions), best_trials)):\n # The trial is not on the Pareto front.\n values.append(hypervolume)\n continue\n\n best_trials = list(\n filter(lambda t: not _dominates(trial, t, study.directions), best_trials)\n ) + [trial]\n\n solution_set = np.asarray(\n list(\n filter(\n lambda v: (v <= minimization_reference_point).all(),\n [signs * trial.values for trial in best_trials],\n )\n )\n )\n if solution_set.size > 0:\n hypervolume = WFG().compute(solution_set, minimization_reference_point)\n values.append(hypervolume)\n\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any feasible trials.\")\n\n return _HypervolumeHistoryInfo(trial_numbers, values)\n","path":"optuna/visualization/_hypervolume_history.py"}],"string":"[\n {\n \"content\": \"from __future__ import annotations\\n\\nfrom collections.abc import Sequence\\nfrom typing import NamedTuple\\n\\nimport numpy as np\\n\\nfrom optuna._experimental import experimental_func\\nfrom optuna._hypervolume import WFG\\nfrom optuna.logging import get_logger\\nfrom optuna.samplers._base import _CONSTRAINTS_KEY\\nfrom optuna.study import Study\\nfrom optuna.study._multi_objective import _dominates\\nfrom optuna.study._study_direction import StudyDirection\\nfrom optuna.trial import FrozenTrial\\nfrom optuna.trial import TrialState\\nfrom optuna.visualization._plotly_imports import _imports\\n\\n\\nif _imports.is_successful():\\n from optuna.visualization._plotly_imports import go\\n\\n_logger = get_logger(__name__)\\n\\n\\nclass _HypervolumeHistoryInfo(NamedTuple):\\n trial_numbers: list[int]\\n values: list[float]\\n\\n\\n@experimental_func(\\\"3.3.0\\\")\\ndef plot_hypervolume_history(\\n study: Study,\\n reference_point: Sequence[float],\\n) -> \\\"go.Figure\\\":\\n \\\"\\\"\\\"Plot hypervolume history of all trials in a study.\\n\\n Example:\\n\\n The following code snippet shows how to plot optimization history.\\n\\n .. plotly::\\n\\n import optuna\\n\\n\\n def objective(trial):\\n x = trial.suggest_float(\\\"x\\\", 0, 5)\\n y = trial.suggest_float(\\\"y\\\", 0, 3)\\n\\n v0 = 4 * x ** 2 + 4 * y ** 2\\n v1 = (x - 5) ** 2 + (y - 5) ** 2\\n return v0, v1\\n\\n\\n study = optuna.create_study(directions=[\\\"minimize\\\", \\\"minimize\\\"])\\n study.optimize(objective, n_trials=50)\\n\\n reference_point=[100., 50.]\\n fig = optuna.visualization.plot_hypervolume_history(study, reference_point)\\n fig.show()\\n\\n Args:\\n study:\\n A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.\\n The number of objectives must be 2 or more.\\n\\n reference_point:\\n A reference point to use for hypervolume computation.\\n The dimension of the reference point must be the same as the number of objectives.\\n\\n Returns:\\n A :class:`plotly.graph_objs.Figure` object.\\n \\\"\\\"\\\"\\n\\n _imports.check()\\n\\n if not study._is_multi_objective():\\n raise ValueError(\\n \\\"Study must be multi-objective. For single-objective optimization, \\\"\\n \\\"please use plot_optimization_history instead.\\\"\\n )\\n\\n if len(reference_point) != len(study.directions):\\n raise ValueError(\\n \\\"The dimension of the reference point must be the same as the number of objectives.\\\"\\n )\\n\\n info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))\\n return _get_hypervolume_history_plot(info)\\n\\n\\ndef _get_hypervolume_history_plot(\\n info: _HypervolumeHistoryInfo,\\n) -> \\\"go.Figure\\\":\\n layout = go.Layout(\\n title=\\\"Hypervolume History Plot\\\",\\n xaxis={\\\"title\\\": \\\"Trial\\\"},\\n yaxis={\\\"title\\\": \\\"Hypervolume\\\"},\\n )\\n\\n data = go.Scatter(\\n x=info.trial_numbers,\\n y=info.values,\\n mode=\\\"lines+markers\\\",\\n )\\n return go.Figure(data=data, layout=layout)\\n\\n\\ndef _get_hypervolume_history_info(\\n study: Study,\\n reference_point: np.ndarray,\\n) -> _HypervolumeHistoryInfo:\\n completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))\\n\\n if len(completed_trials) == 0:\\n _logger.warning(\\\"Your study does not have any completed trials.\\\")\\n\\n # Our hypervolume computation module assumes that all objectives are minimized.\\n # Here we transform the objective values and the reference point.\\n signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])\\n minimization_reference_point = signs * reference_point\\n\\n # Only feasible trials are considered in hypervolume computation.\\n trial_numbers = []\\n values = []\\n best_trials: list[FrozenTrial] = []\\n hypervolume = 0.0\\n for trial in completed_trials:\\n trial_numbers.append(trial.number)\\n\\n has_constraints = _CONSTRAINTS_KEY in trial.system_attrs\\n if has_constraints:\\n constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]\\n if any(map(lambda x: x > 0.0, constraints_values)):\\n # The trial is infeasible.\\n values.append(hypervolume)\\n continue\\n\\n if any(map(lambda t: _dominates(t, trial, study.directions), best_trials)):\\n # The trial is not on the Pareto front.\\n values.append(hypervolume)\\n continue\\n\\n best_trials = list(\\n filter(lambda t: not _dominates(trial, t, study.directions), best_trials)\\n ) + [trial]\\n\\n solution_set = np.asarray(\\n list(\\n filter(\\n lambda v: (v <= minimization_reference_point).all(),\\n [signs * trial.values for trial in best_trials],\\n )\\n )\\n )\\n if solution_set.size > 0:\\n hypervolume = WFG().compute(solution_set, minimization_reference_point)\\n values.append(hypervolume)\\n\\n if len(best_trials) == 0:\\n _logger.warning(\\\"Your study does not have any feasible trials.\\\")\\n\\n return _HypervolumeHistoryInfo(trial_numbers, values)\\n\",\n \"path\": \"optuna/visualization/_hypervolume_history.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/optuna/visualization/_hypervolume_history.py b/optuna/visualization/_hypervolume_history.py\nindex c1fff4b877..39e6c23e53 100644\n--- a/optuna/visualization/_hypervolume_history.py\n+++ b/optuna/visualization/_hypervolume_history.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n+from collections.abc import Sequence\n from typing import NamedTuple\n-from typing import Sequence\n \n import numpy as np\n \n"}}},{"rowIdx":430,"cells":{"in_source_id":{"kind":"string","value":"ansible__ansible-modules-core-3859"},"issue":{"kind":"string","value":"Bug in regex checker for azure_rm_virtualnetwork\n##### ISSUE TYPE\n- Feature Idea\n##### COMPONENT NAME\n\nazure_rm_virtualnetwork\n##### ANSIBLE VERSION\n\n2.2.0\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nUbuntu 14.04\n##### SUMMARY\n\nazure_rm_virtualnetwork contains code to check for the validity of the \"name\" parameter (https://github.com/ansible/ansible-modules-core/blob/devel/cloud/azure/azure_rm_virtualnetwork.py#L148). That regex does not take into account that the \".\" character is valid, as long as it's neither at the start or end of the name string.\n##### STEPS TO REPRODUCE\n\n```\n- name: Create virtual network\n azure_rm_virtualnetwork:\n name: My.Sweet.Network\n```\n##### EXPECTED RESULTS\n\nThe network should get created, since it has a valid name\n##### ACTUAL RESULTS\n\nmsg\": \"Parameter error: name must begin with a letter or number, end with a letter, number or underscore and may contain only letters, numbers, periods, underscores or hyphens.\"}\n\n"},"before_files":{"kind":"list like","value":[{"content":"#!/usr/bin/python\n#\n# Copyright (c) 2016 Matt Davis, \n# Chris Houseknecht, \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_virtualnetwork\nversion_added: \"2.1\"\nshort_description: Manage Azure virtual networks.\ndescription:\n - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges\n and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.\noptions:\n resource_group:\n description:\n - name of resource group.\n required: true\n address_prefixes_cidr:\n description:\n - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating\n a new virtual network or using purge_address_prefixes.\n aliases:\n - address_prefixes\n default: null\n required: false\n dns_servers:\n description:\n - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated\n as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the\n specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to\n default Azure servers.\n default: null\n required: false\n location:\n description:\n - Valid azure location. Defaults to location of the resource group.\n default: resource_group location\n required: false\n name:\n description:\n - name of the virtual network.\n required: true\n purge_address_prefixes:\n description:\n - Use with state present to remove any existing address_prefixes.\n default: false\n purge_dns_servers:\n description:\n - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually\n exclusive with dns_servers.\n default: false\n required: false\n state:\n description:\n - Assert the state of the virtual network. Use 'present' to create or update and\n 'absent' to delete.\n default: present\n choices:\n - absent\n - present\n required: false\n\nextends_documentation_fragment:\n - azure\n - azure_tags\n\nauthor:\n - \"Chris Houseknecht (@chouseknecht)\"\n - \"Matt Davis (@nitzmahone)\"\n\n'''\n\nEXAMPLES = '''\n - name: Create a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n address_prefixes_cidr:\n - \"10.1.0.0/16\"\n - \"172.100.0.0/16\"\n dns_servers:\n - \"127.0.0.1\"\n - \"127.0.0.2\"\n tags:\n testing: testing\n delete: on-exit\n\n - name: Delete a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n state: absent\n'''\nRETURN = '''\nstate:\n description: Current state of the virtual network.\n returned: always\n type: dict\n sample: {\n \"address_prefixes\": [\n \"10.1.0.0/16\",\n \"172.100.0.0/16\"\n ],\n \"dns_servers\": [\n \"127.0.0.1\",\n \"127.0.0.3\"\n ],\n \"etag\": 'W/\"0712e87c-f02f-4bb3-8b9e-2da0390a3886\"',\n \"id\": \"/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network\",\n \"location\": \"eastus\",\n \"name\": \"my_test_network\",\n \"provisioning_state\": \"Succeeded\",\n \"tags\": null,\n \"type\": \"Microsoft.Network/virtualNetworks\"\n }\n'''\n\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.azure_rm_common import *\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nNAME_PATTERN = re.compile(r\"^[a-zA-Z0-9_]{1,61}[a-z0-9_]$\")\n\n\ndef virtual_network_to_dict(vnet):\n '''\n Convert a virtual network object to a dict.\n :param vnet: VirtualNet object\n :return: dict\n '''\n results = dict(\n id=vnet.id,\n name=vnet.name,\n location=vnet.location,\n type=vnet.type,\n tags=vnet.tags,\n provisioning_state=vnet.provisioning_state,\n etag=vnet.etag\n )\n if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n results['dns_servers'] = []\n for server in vnet.dhcp_options.dns_servers:\n results['dns_servers'].append(server)\n if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:\n results['address_prefixes'] = []\n for space in vnet.address_space.address_prefixes:\n results['address_prefixes'].append(space)\n return results\n\n\nclass AzureRMVirtualNetwork(AzureRMModuleBase):\n\n def __init__(self):\n\n self.module_arg_spec = dict(\n resource_group=dict(type='str', required=True),\n name=dict(type='str', required=True),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n location=dict(type='str'),\n address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),\n dns_servers=dict(type='list',),\n purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),\n purge_dns_servers=dict(type='bool', default=False),\n )\n\n mutually_exclusive = [\n ('dns_servers', 'purge_dns_servers')\n ]\n\n required_if = [\n ('purge_address_prefixes', True, ['address_prefixes_cidr'])\n ]\n\n self.resource_group = None\n self.name = None\n self.state = None\n self.location = None\n self.address_prefixes_cidr = None\n self.purge_address_prefixes = None\n self.dns_servers = None\n self.purge_dns_servers = None\n\n self.results=dict(\n changed=False,\n state=dict()\n )\n\n super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,\n mutually_exclusive=mutually_exclusive,\n required_if=required_if,\n supports_check_mode=True)\n\n def exec_module(self, **kwargs):\n\n for key in self.module_arg_spec.keys() + ['tags']:\n setattr(self, key, kwargs[key])\n\n self.results['check_mode'] = self.check_mode\n\n resource_group = self.get_resource_group(self.resource_group)\n if not self.location:\n # Set default location\n self.location = resource_group.location\n\n if not NAME_PATTERN.match(self.name):\n self.fail(\"Parameter error: name must begin with a letter or number, end with a letter, number \"\n \"or underscore and may contain only letters, numbers, periods, underscores or hyphens.\")\n\n if self.state == 'present' and self.purge_address_prefixes:\n for prefix in self.address_prefixes_cidr:\n if not CIDR_PATTERN.match(prefix):\n self.fail(\"Parameter error: invalid address prefix value {0}\".format(prefix))\n\n if self.dns_servers and len(self.dns_servers) > 2:\n self.fail(\"Parameter error: You can provide a maximum of 2 DNS servers.\")\n\n changed = False\n results = dict()\n\n try:\n self.log('Fetching vnet {0}'.format(self.name))\n vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)\n\n results = virtual_network_to_dict(vnet)\n self.log('Vnet exists {0}'.format(self.name))\n self.log(results, pretty_print=True)\n self.check_provisioning_state(vnet, self.state)\n\n if self.state == 'present':\n if self.address_prefixes_cidr:\n existing_address_prefix_set = set(vnet.address_space.address_prefixes)\n requested_address_prefix_set = set(self.address_prefixes_cidr)\n missing_prefixes = requested_address_prefix_set - existing_address_prefix_set\n extra_prefixes = existing_address_prefix_set - requested_address_prefix_set\n if len(missing_prefixes) > 0:\n self.log('CHANGED: there are missing address_prefixes')\n changed = True\n if not self.purge_address_prefixes:\n # add the missing prefixes\n for prefix in missing_prefixes:\n results['address_prefixes'].append(prefix)\n\n if len(extra_prefixes) > 0 and self.purge_address_prefixes:\n self.log('CHANGED: there are address_prefixes to purge')\n changed = True\n # replace existing address prefixes with requested set\n results['address_prefixes'] = self.address_prefixes_cidr\n\n update_tags, results['tags'] = self.update_tags(results['tags'])\n if update_tags:\n changed = True\n\n if self.dns_servers:\n existing_dns_set = set(vnet.dhcp_options.dns_servers)\n requested_dns_set = set(self.dns_servers)\n if existing_dns_set != requested_dns_set:\n self.log('CHANGED: replacing DNS servers')\n changed = True\n results['dns_servers'] = self.dns_servers\n\n if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n self.log('CHANGED: purging existing DNS servers')\n changed = True\n results['dns_servers'] = []\n elif self.state == 'absent':\n self.log(\"CHANGED: vnet exists but requested state is 'absent'\")\n changed = True\n except CloudError:\n self.log('Vnet {0} does not exist'.format(self.name))\n if self.state == 'present':\n self.log(\"CHANGED: vnet {0} does not exist but requested state is 'present'\".format(self.name))\n changed = True\n\n self.results['changed'] = changed\n self.results['state'] = results\n\n if self.check_mode:\n return self.results\n\n if changed:\n if self.state == 'present':\n if not results:\n # create a new virtual network\n self.log(\"Create virtual network {0}\".format(self.name))\n if not self.address_prefixes_cidr:\n self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')\n vnet = VirtualNetwork(\n location=self.location,\n address_space=AddressSpace(\n address_prefixes=self.address_prefixes_cidr\n )\n )\n if self.dns_servers:\n vnet.dhcp_options = DhcpOptions(\n dns_servers=self.dns_servers\n )\n if self.tags:\n vnet.tags = self.tags\n self.results['state'] = self.create_or_update_vnet(vnet)\n else:\n # update existing virtual network\n self.log(\"Update virtual network {0}\".format(self.name))\n vnet = VirtualNetwork(\n location=results['location'],\n address_space=AddressSpace(\n address_prefixes=results['address_prefixes']\n ),\n tags=results['tags']\n )\n if results.get('dns_servers'):\n vnet.dhcp_options = DhcpOptions(\n dns_servers=results['dns_servers']\n )\n self.results['state'] = self.create_or_update_vnet(vnet)\n elif self.state == 'absent':\n self.delete_virtual_network()\n self.results['state']['status'] = 'Deleted'\n\n\n return self.results\n\n def create_or_update_vnet(self, vnet):\n try:\n poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)\n new_vnet = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error creating or updating virtual network {0} - {1}\".format(self.name, str(exc)))\n return virtual_network_to_dict(new_vnet)\n\n def delete_virtual_network(self):\n try:\n poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)\n result = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error deleting virtual network {0} - {1}\".format(self.name, str(exc)))\n return result\n\n\ndef main():\n AzureRMVirtualNetwork()\n\nif __name__ == '__main__':\n main()\n\n","path":"cloud/azure/azure_rm_virtualnetwork.py"}],"string":"[\n {\n \"content\": \"#!/usr/bin/python\\n#\\n# Copyright (c) 2016 Matt Davis, \\n# Chris Houseknecht, \\n#\\n# This file is part of Ansible\\n#\\n# Ansible is free software: you can redistribute it and/or modify\\n# it under the terms of the GNU General Public License as published by\\n# the Free Software Foundation, either version 3 of the License, or\\n# (at your option) any later version.\\n#\\n# Ansible is distributed in the hope that it will be useful,\\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n# GNU General Public License for more details.\\n#\\n# You should have received a copy of the GNU General Public License\\n# along with Ansible. If not, see .\\n#\\n\\n\\nDOCUMENTATION = '''\\n---\\nmodule: azure_rm_virtualnetwork\\nversion_added: \\\"2.1\\\"\\nshort_description: Manage Azure virtual networks.\\ndescription:\\n - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges\\n and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.\\noptions:\\n resource_group:\\n description:\\n - name of resource group.\\n required: true\\n address_prefixes_cidr:\\n description:\\n - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating\\n a new virtual network or using purge_address_prefixes.\\n aliases:\\n - address_prefixes\\n default: null\\n required: false\\n dns_servers:\\n description:\\n - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated\\n as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the\\n specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to\\n default Azure servers.\\n default: null\\n required: false\\n location:\\n description:\\n - Valid azure location. Defaults to location of the resource group.\\n default: resource_group location\\n required: false\\n name:\\n description:\\n - name of the virtual network.\\n required: true\\n purge_address_prefixes:\\n description:\\n - Use with state present to remove any existing address_prefixes.\\n default: false\\n purge_dns_servers:\\n description:\\n - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually\\n exclusive with dns_servers.\\n default: false\\n required: false\\n state:\\n description:\\n - Assert the state of the virtual network. Use 'present' to create or update and\\n 'absent' to delete.\\n default: present\\n choices:\\n - absent\\n - present\\n required: false\\n\\nextends_documentation_fragment:\\n - azure\\n - azure_tags\\n\\nauthor:\\n - \\\"Chris Houseknecht (@chouseknecht)\\\"\\n - \\\"Matt Davis (@nitzmahone)\\\"\\n\\n'''\\n\\nEXAMPLES = '''\\n - name: Create a virtual network\\n azure_rm_virtualnetwork:\\n name: foobar\\n resource_group: Testing\\n address_prefixes_cidr:\\n - \\\"10.1.0.0/16\\\"\\n - \\\"172.100.0.0/16\\\"\\n dns_servers:\\n - \\\"127.0.0.1\\\"\\n - \\\"127.0.0.2\\\"\\n tags:\\n testing: testing\\n delete: on-exit\\n\\n - name: Delete a virtual network\\n azure_rm_virtualnetwork:\\n name: foobar\\n resource_group: Testing\\n state: absent\\n'''\\nRETURN = '''\\nstate:\\n description: Current state of the virtual network.\\n returned: always\\n type: dict\\n sample: {\\n \\\"address_prefixes\\\": [\\n \\\"10.1.0.0/16\\\",\\n \\\"172.100.0.0/16\\\"\\n ],\\n \\\"dns_servers\\\": [\\n \\\"127.0.0.1\\\",\\n \\\"127.0.0.3\\\"\\n ],\\n \\\"etag\\\": 'W/\\\"0712e87c-f02f-4bb3-8b9e-2da0390a3886\\\"',\\n \\\"id\\\": \\\"/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network\\\",\\n \\\"location\\\": \\\"eastus\\\",\\n \\\"name\\\": \\\"my_test_network\\\",\\n \\\"provisioning_state\\\": \\\"Succeeded\\\",\\n \\\"tags\\\": null,\\n \\\"type\\\": \\\"Microsoft.Network/virtualNetworks\\\"\\n }\\n'''\\n\\n\\nfrom ansible.module_utils.basic import *\\nfrom ansible.module_utils.azure_rm_common import *\\n\\ntry:\\n from msrestazure.azure_exceptions import CloudError\\n from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions\\nexcept ImportError:\\n # This is handled in azure_rm_common\\n pass\\n\\n\\nNAME_PATTERN = re.compile(r\\\"^[a-zA-Z0-9_]{1,61}[a-z0-9_]$\\\")\\n\\n\\ndef virtual_network_to_dict(vnet):\\n '''\\n Convert a virtual network object to a dict.\\n :param vnet: VirtualNet object\\n :return: dict\\n '''\\n results = dict(\\n id=vnet.id,\\n name=vnet.name,\\n location=vnet.location,\\n type=vnet.type,\\n tags=vnet.tags,\\n provisioning_state=vnet.provisioning_state,\\n etag=vnet.etag\\n )\\n if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\\n results['dns_servers'] = []\\n for server in vnet.dhcp_options.dns_servers:\\n results['dns_servers'].append(server)\\n if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:\\n results['address_prefixes'] = []\\n for space in vnet.address_space.address_prefixes:\\n results['address_prefixes'].append(space)\\n return results\\n\\n\\nclass AzureRMVirtualNetwork(AzureRMModuleBase):\\n\\n def __init__(self):\\n\\n self.module_arg_spec = dict(\\n resource_group=dict(type='str', required=True),\\n name=dict(type='str', required=True),\\n state=dict(type='str', default='present', choices=['present', 'absent']),\\n location=dict(type='str'),\\n address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),\\n dns_servers=dict(type='list',),\\n purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),\\n purge_dns_servers=dict(type='bool', default=False),\\n )\\n\\n mutually_exclusive = [\\n ('dns_servers', 'purge_dns_servers')\\n ]\\n\\n required_if = [\\n ('purge_address_prefixes', True, ['address_prefixes_cidr'])\\n ]\\n\\n self.resource_group = None\\n self.name = None\\n self.state = None\\n self.location = None\\n self.address_prefixes_cidr = None\\n self.purge_address_prefixes = None\\n self.dns_servers = None\\n self.purge_dns_servers = None\\n\\n self.results=dict(\\n changed=False,\\n state=dict()\\n )\\n\\n super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,\\n mutually_exclusive=mutually_exclusive,\\n required_if=required_if,\\n supports_check_mode=True)\\n\\n def exec_module(self, **kwargs):\\n\\n for key in self.module_arg_spec.keys() + ['tags']:\\n setattr(self, key, kwargs[key])\\n\\n self.results['check_mode'] = self.check_mode\\n\\n resource_group = self.get_resource_group(self.resource_group)\\n if not self.location:\\n # Set default location\\n self.location = resource_group.location\\n\\n if not NAME_PATTERN.match(self.name):\\n self.fail(\\\"Parameter error: name must begin with a letter or number, end with a letter, number \\\"\\n \\\"or underscore and may contain only letters, numbers, periods, underscores or hyphens.\\\")\\n\\n if self.state == 'present' and self.purge_address_prefixes:\\n for prefix in self.address_prefixes_cidr:\\n if not CIDR_PATTERN.match(prefix):\\n self.fail(\\\"Parameter error: invalid address prefix value {0}\\\".format(prefix))\\n\\n if self.dns_servers and len(self.dns_servers) > 2:\\n self.fail(\\\"Parameter error: You can provide a maximum of 2 DNS servers.\\\")\\n\\n changed = False\\n results = dict()\\n\\n try:\\n self.log('Fetching vnet {0}'.format(self.name))\\n vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)\\n\\n results = virtual_network_to_dict(vnet)\\n self.log('Vnet exists {0}'.format(self.name))\\n self.log(results, pretty_print=True)\\n self.check_provisioning_state(vnet, self.state)\\n\\n if self.state == 'present':\\n if self.address_prefixes_cidr:\\n existing_address_prefix_set = set(vnet.address_space.address_prefixes)\\n requested_address_prefix_set = set(self.address_prefixes_cidr)\\n missing_prefixes = requested_address_prefix_set - existing_address_prefix_set\\n extra_prefixes = existing_address_prefix_set - requested_address_prefix_set\\n if len(missing_prefixes) > 0:\\n self.log('CHANGED: there are missing address_prefixes')\\n changed = True\\n if not self.purge_address_prefixes:\\n # add the missing prefixes\\n for prefix in missing_prefixes:\\n results['address_prefixes'].append(prefix)\\n\\n if len(extra_prefixes) > 0 and self.purge_address_prefixes:\\n self.log('CHANGED: there are address_prefixes to purge')\\n changed = True\\n # replace existing address prefixes with requested set\\n results['address_prefixes'] = self.address_prefixes_cidr\\n\\n update_tags, results['tags'] = self.update_tags(results['tags'])\\n if update_tags:\\n changed = True\\n\\n if self.dns_servers:\\n existing_dns_set = set(vnet.dhcp_options.dns_servers)\\n requested_dns_set = set(self.dns_servers)\\n if existing_dns_set != requested_dns_set:\\n self.log('CHANGED: replacing DNS servers')\\n changed = True\\n results['dns_servers'] = self.dns_servers\\n\\n if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\\n self.log('CHANGED: purging existing DNS servers')\\n changed = True\\n results['dns_servers'] = []\\n elif self.state == 'absent':\\n self.log(\\\"CHANGED: vnet exists but requested state is 'absent'\\\")\\n changed = True\\n except CloudError:\\n self.log('Vnet {0} does not exist'.format(self.name))\\n if self.state == 'present':\\n self.log(\\\"CHANGED: vnet {0} does not exist but requested state is 'present'\\\".format(self.name))\\n changed = True\\n\\n self.results['changed'] = changed\\n self.results['state'] = results\\n\\n if self.check_mode:\\n return self.results\\n\\n if changed:\\n if self.state == 'present':\\n if not results:\\n # create a new virtual network\\n self.log(\\\"Create virtual network {0}\\\".format(self.name))\\n if not self.address_prefixes_cidr:\\n self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')\\n vnet = VirtualNetwork(\\n location=self.location,\\n address_space=AddressSpace(\\n address_prefixes=self.address_prefixes_cidr\\n )\\n )\\n if self.dns_servers:\\n vnet.dhcp_options = DhcpOptions(\\n dns_servers=self.dns_servers\\n )\\n if self.tags:\\n vnet.tags = self.tags\\n self.results['state'] = self.create_or_update_vnet(vnet)\\n else:\\n # update existing virtual network\\n self.log(\\\"Update virtual network {0}\\\".format(self.name))\\n vnet = VirtualNetwork(\\n location=results['location'],\\n address_space=AddressSpace(\\n address_prefixes=results['address_prefixes']\\n ),\\n tags=results['tags']\\n )\\n if results.get('dns_servers'):\\n vnet.dhcp_options = DhcpOptions(\\n dns_servers=results['dns_servers']\\n )\\n self.results['state'] = self.create_or_update_vnet(vnet)\\n elif self.state == 'absent':\\n self.delete_virtual_network()\\n self.results['state']['status'] = 'Deleted'\\n\\n\\n return self.results\\n\\n def create_or_update_vnet(self, vnet):\\n try:\\n poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)\\n new_vnet = self.get_poller_result(poller)\\n except Exception as exc:\\n self.fail(\\\"Error creating or updating virtual network {0} - {1}\\\".format(self.name, str(exc)))\\n return virtual_network_to_dict(new_vnet)\\n\\n def delete_virtual_network(self):\\n try:\\n poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)\\n result = self.get_poller_result(poller)\\n except Exception as exc:\\n self.fail(\\\"Error deleting virtual network {0} - {1}\\\".format(self.name, str(exc)))\\n return result\\n\\n\\ndef main():\\n AzureRMVirtualNetwork()\\n\\nif __name__ == '__main__':\\n main()\\n\\n\",\n \"path\": \"cloud/azure/azure_rm_virtualnetwork.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"#!/usr/bin/python\n#\n# Copyright (c) 2016 Matt Davis, \n# Chris Houseknecht, \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_virtualnetwork\nversion_added: \"2.1\"\nshort_description: Manage Azure virtual networks.\ndescription:\n - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges\n and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.\noptions:\n resource_group:\n description:\n - name of resource group.\n required: true\n address_prefixes_cidr:\n description:\n - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating\n a new virtual network or using purge_address_prefixes.\n aliases:\n - address_prefixes\n default: null\n required: false\n dns_servers:\n description:\n - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated\n as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the\n specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to\n default Azure servers.\n default: null\n required: false\n location:\n description:\n - Valid azure location. Defaults to location of the resource group.\n default: resource_group location\n required: false\n name:\n description:\n - name of the virtual network.\n required: true\n purge_address_prefixes:\n description:\n - Use with state present to remove any existing address_prefixes.\n default: false\n purge_dns_servers:\n description:\n - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually\n exclusive with dns_servers.\n default: false\n required: false\n state:\n description:\n - Assert the state of the virtual network. Use 'present' to create or update and\n 'absent' to delete.\n default: present\n choices:\n - absent\n - present\n required: false\n\nextends_documentation_fragment:\n - azure\n - azure_tags\n\nauthor:\n - \"Chris Houseknecht (@chouseknecht)\"\n - \"Matt Davis (@nitzmahone)\"\n\n'''\n\nEXAMPLES = '''\n - name: Create a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n address_prefixes_cidr:\n - \"10.1.0.0/16\"\n - \"172.100.0.0/16\"\n dns_servers:\n - \"127.0.0.1\"\n - \"127.0.0.2\"\n tags:\n testing: testing\n delete: on-exit\n\n - name: Delete a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n state: absent\n'''\nRETURN = '''\nstate:\n description: Current state of the virtual network.\n returned: always\n type: dict\n sample: {\n \"address_prefixes\": [\n \"10.1.0.0/16\",\n \"172.100.0.0/16\"\n ],\n \"dns_servers\": [\n \"127.0.0.1\",\n \"127.0.0.3\"\n ],\n \"etag\": 'W/\"0712e87c-f02f-4bb3-8b9e-2da0390a3886\"',\n \"id\": \"/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network\",\n \"location\": \"eastus\",\n \"name\": \"my_test_network\",\n \"provisioning_state\": \"Succeeded\",\n \"tags\": null,\n \"type\": \"Microsoft.Network/virtualNetworks\"\n }\n'''\n\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.azure_rm_common import *\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nNAME_PATTERN = re.compile(r\"^[a-zA-Z0-9]+[a-zA-Z0-9\\._-]+[a-zA-Z0-9_]+$\")\n\n\ndef virtual_network_to_dict(vnet):\n '''\n Convert a virtual network object to a dict.\n :param vnet: VirtualNet object\n :return: dict\n '''\n results = dict(\n id=vnet.id,\n name=vnet.name,\n location=vnet.location,\n type=vnet.type,\n tags=vnet.tags,\n provisioning_state=vnet.provisioning_state,\n etag=vnet.etag\n )\n if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n results['dns_servers'] = []\n for server in vnet.dhcp_options.dns_servers:\n results['dns_servers'].append(server)\n if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:\n results['address_prefixes'] = []\n for space in vnet.address_space.address_prefixes:\n results['address_prefixes'].append(space)\n return results\n\n\nclass AzureRMVirtualNetwork(AzureRMModuleBase):\n\n def __init__(self):\n\n self.module_arg_spec = dict(\n resource_group=dict(type='str', required=True),\n name=dict(type='str', required=True),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n location=dict(type='str'),\n address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),\n dns_servers=dict(type='list',),\n purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),\n purge_dns_servers=dict(type='bool', default=False),\n )\n\n mutually_exclusive = [\n ('dns_servers', 'purge_dns_servers')\n ]\n\n required_if = [\n ('purge_address_prefixes', True, ['address_prefixes_cidr'])\n ]\n\n self.resource_group = None\n self.name = None\n self.state = None\n self.location = None\n self.address_prefixes_cidr = None\n self.purge_address_prefixes = None\n self.dns_servers = None\n self.purge_dns_servers = None\n\n self.results=dict(\n changed=False,\n state=dict()\n )\n\n super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,\n mutually_exclusive=mutually_exclusive,\n required_if=required_if,\n supports_check_mode=True)\n\n def exec_module(self, **kwargs):\n\n for key in self.module_arg_spec.keys() + ['tags']:\n setattr(self, key, kwargs[key])\n\n self.results['check_mode'] = self.check_mode\n\n resource_group = self.get_resource_group(self.resource_group)\n if not self.location:\n # Set default location\n self.location = resource_group.location\n\n if not NAME_PATTERN.match(self.name):\n self.fail(\"Parameter error: name must begin with a letter or number, end with a letter, number \"\n \"or underscore and may contain only letters, numbers, periods, underscores or hyphens.\")\n\n if self.state == 'present' and self.purge_address_prefixes:\n for prefix in self.address_prefixes_cidr:\n if not CIDR_PATTERN.match(prefix):\n self.fail(\"Parameter error: invalid address prefix value {0}\".format(prefix))\n\n if self.dns_servers and len(self.dns_servers) > 2:\n self.fail(\"Parameter error: You can provide a maximum of 2 DNS servers.\")\n\n changed = False\n results = dict()\n\n try:\n self.log('Fetching vnet {0}'.format(self.name))\n vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)\n\n results = virtual_network_to_dict(vnet)\n self.log('Vnet exists {0}'.format(self.name))\n self.log(results, pretty_print=True)\n self.check_provisioning_state(vnet, self.state)\n\n if self.state == 'present':\n if self.address_prefixes_cidr:\n existing_address_prefix_set = set(vnet.address_space.address_prefixes)\n requested_address_prefix_set = set(self.address_prefixes_cidr)\n missing_prefixes = requested_address_prefix_set - existing_address_prefix_set\n extra_prefixes = existing_address_prefix_set - requested_address_prefix_set\n if len(missing_prefixes) > 0:\n self.log('CHANGED: there are missing address_prefixes')\n changed = True\n if not self.purge_address_prefixes:\n # add the missing prefixes\n for prefix in missing_prefixes:\n results['address_prefixes'].append(prefix)\n\n if len(extra_prefixes) > 0 and self.purge_address_prefixes:\n self.log('CHANGED: there are address_prefixes to purge')\n changed = True\n # replace existing address prefixes with requested set\n results['address_prefixes'] = self.address_prefixes_cidr\n\n update_tags, results['tags'] = self.update_tags(results['tags'])\n if update_tags:\n changed = True\n\n if self.dns_servers:\n existing_dns_set = set(vnet.dhcp_options.dns_servers)\n requested_dns_set = set(self.dns_servers)\n if existing_dns_set != requested_dns_set:\n self.log('CHANGED: replacing DNS servers')\n changed = True\n results['dns_servers'] = self.dns_servers\n\n if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n self.log('CHANGED: purging existing DNS servers')\n changed = True\n results['dns_servers'] = []\n elif self.state == 'absent':\n self.log(\"CHANGED: vnet exists but requested state is 'absent'\")\n changed = True\n except CloudError:\n self.log('Vnet {0} does not exist'.format(self.name))\n if self.state == 'present':\n self.log(\"CHANGED: vnet {0} does not exist but requested state is 'present'\".format(self.name))\n changed = True\n\n self.results['changed'] = changed\n self.results['state'] = results\n\n if self.check_mode:\n return self.results\n\n if changed:\n if self.state == 'present':\n if not results:\n # create a new virtual network\n self.log(\"Create virtual network {0}\".format(self.name))\n if not self.address_prefixes_cidr:\n self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')\n vnet = VirtualNetwork(\n location=self.location,\n address_space=AddressSpace(\n address_prefixes=self.address_prefixes_cidr\n )\n )\n if self.dns_servers:\n vnet.dhcp_options = DhcpOptions(\n dns_servers=self.dns_servers\n )\n if self.tags:\n vnet.tags = self.tags\n self.results['state'] = self.create_or_update_vnet(vnet)\n else:\n # update existing virtual network\n self.log(\"Update virtual network {0}\".format(self.name))\n vnet = VirtualNetwork(\n location=results['location'],\n address_space=AddressSpace(\n address_prefixes=results['address_prefixes']\n ),\n tags=results['tags']\n )\n if results.get('dns_servers'):\n vnet.dhcp_options = DhcpOptions(\n dns_servers=results['dns_servers']\n )\n self.results['state'] = self.create_or_update_vnet(vnet)\n elif self.state == 'absent':\n self.delete_virtual_network()\n self.results['state']['status'] = 'Deleted'\n\n\n return self.results\n\n def create_or_update_vnet(self, vnet):\n try:\n poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)\n new_vnet = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error creating or updating virtual network {0} - {1}\".format(self.name, str(exc)))\n return virtual_network_to_dict(new_vnet)\n\n def delete_virtual_network(self):\n try:\n poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)\n result = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error deleting virtual network {0} - {1}\".format(self.name, str(exc)))\n return result\n\n\ndef main():\n AzureRMVirtualNetwork()\n\nif __name__ == '__main__':\n main()\n\n","path":"cloud/azure/azure_rm_virtualnetwork.py"}],"string":"[\n {\n \"content\": \"#!/usr/bin/python\\n#\\n# Copyright (c) 2016 Matt Davis, \\n# Chris Houseknecht, \\n#\\n# This file is part of Ansible\\n#\\n# Ansible is free software: you can redistribute it and/or modify\\n# it under the terms of the GNU General Public License as published by\\n# the Free Software Foundation, either version 3 of the License, or\\n# (at your option) any later version.\\n#\\n# Ansible is distributed in the hope that it will be useful,\\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n# GNU General Public License for more details.\\n#\\n# You should have received a copy of the GNU General Public License\\n# along with Ansible. If not, see .\\n#\\n\\n\\nDOCUMENTATION = '''\\n---\\nmodule: azure_rm_virtualnetwork\\nversion_added: \\\"2.1\\\"\\nshort_description: Manage Azure virtual networks.\\ndescription:\\n - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges\\n and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.\\noptions:\\n resource_group:\\n description:\\n - name of resource group.\\n required: true\\n address_prefixes_cidr:\\n description:\\n - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating\\n a new virtual network or using purge_address_prefixes.\\n aliases:\\n - address_prefixes\\n default: null\\n required: false\\n dns_servers:\\n description:\\n - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated\\n as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the\\n specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to\\n default Azure servers.\\n default: null\\n required: false\\n location:\\n description:\\n - Valid azure location. Defaults to location of the resource group.\\n default: resource_group location\\n required: false\\n name:\\n description:\\n - name of the virtual network.\\n required: true\\n purge_address_prefixes:\\n description:\\n - Use with state present to remove any existing address_prefixes.\\n default: false\\n purge_dns_servers:\\n description:\\n - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually\\n exclusive with dns_servers.\\n default: false\\n required: false\\n state:\\n description:\\n - Assert the state of the virtual network. Use 'present' to create or update and\\n 'absent' to delete.\\n default: present\\n choices:\\n - absent\\n - present\\n required: false\\n\\nextends_documentation_fragment:\\n - azure\\n - azure_tags\\n\\nauthor:\\n - \\\"Chris Houseknecht (@chouseknecht)\\\"\\n - \\\"Matt Davis (@nitzmahone)\\\"\\n\\n'''\\n\\nEXAMPLES = '''\\n - name: Create a virtual network\\n azure_rm_virtualnetwork:\\n name: foobar\\n resource_group: Testing\\n address_prefixes_cidr:\\n - \\\"10.1.0.0/16\\\"\\n - \\\"172.100.0.0/16\\\"\\n dns_servers:\\n - \\\"127.0.0.1\\\"\\n - \\\"127.0.0.2\\\"\\n tags:\\n testing: testing\\n delete: on-exit\\n\\n - name: Delete a virtual network\\n azure_rm_virtualnetwork:\\n name: foobar\\n resource_group: Testing\\n state: absent\\n'''\\nRETURN = '''\\nstate:\\n description: Current state of the virtual network.\\n returned: always\\n type: dict\\n sample: {\\n \\\"address_prefixes\\\": [\\n \\\"10.1.0.0/16\\\",\\n \\\"172.100.0.0/16\\\"\\n ],\\n \\\"dns_servers\\\": [\\n \\\"127.0.0.1\\\",\\n \\\"127.0.0.3\\\"\\n ],\\n \\\"etag\\\": 'W/\\\"0712e87c-f02f-4bb3-8b9e-2da0390a3886\\\"',\\n \\\"id\\\": \\\"/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network\\\",\\n \\\"location\\\": \\\"eastus\\\",\\n \\\"name\\\": \\\"my_test_network\\\",\\n \\\"provisioning_state\\\": \\\"Succeeded\\\",\\n \\\"tags\\\": null,\\n \\\"type\\\": \\\"Microsoft.Network/virtualNetworks\\\"\\n }\\n'''\\n\\n\\nfrom ansible.module_utils.basic import *\\nfrom ansible.module_utils.azure_rm_common import *\\n\\ntry:\\n from msrestazure.azure_exceptions import CloudError\\n from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions\\nexcept ImportError:\\n # This is handled in azure_rm_common\\n pass\\n\\n\\nNAME_PATTERN = re.compile(r\\\"^[a-zA-Z0-9]+[a-zA-Z0-9\\\\._-]+[a-zA-Z0-9_]+$\\\")\\n\\n\\ndef virtual_network_to_dict(vnet):\\n '''\\n Convert a virtual network object to a dict.\\n :param vnet: VirtualNet object\\n :return: dict\\n '''\\n results = dict(\\n id=vnet.id,\\n name=vnet.name,\\n location=vnet.location,\\n type=vnet.type,\\n tags=vnet.tags,\\n provisioning_state=vnet.provisioning_state,\\n etag=vnet.etag\\n )\\n if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\\n results['dns_servers'] = []\\n for server in vnet.dhcp_options.dns_servers:\\n results['dns_servers'].append(server)\\n if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:\\n results['address_prefixes'] = []\\n for space in vnet.address_space.address_prefixes:\\n results['address_prefixes'].append(space)\\n return results\\n\\n\\nclass AzureRMVirtualNetwork(AzureRMModuleBase):\\n\\n def __init__(self):\\n\\n self.module_arg_spec = dict(\\n resource_group=dict(type='str', required=True),\\n name=dict(type='str', required=True),\\n state=dict(type='str', default='present', choices=['present', 'absent']),\\n location=dict(type='str'),\\n address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),\\n dns_servers=dict(type='list',),\\n purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),\\n purge_dns_servers=dict(type='bool', default=False),\\n )\\n\\n mutually_exclusive = [\\n ('dns_servers', 'purge_dns_servers')\\n ]\\n\\n required_if = [\\n ('purge_address_prefixes', True, ['address_prefixes_cidr'])\\n ]\\n\\n self.resource_group = None\\n self.name = None\\n self.state = None\\n self.location = None\\n self.address_prefixes_cidr = None\\n self.purge_address_prefixes = None\\n self.dns_servers = None\\n self.purge_dns_servers = None\\n\\n self.results=dict(\\n changed=False,\\n state=dict()\\n )\\n\\n super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,\\n mutually_exclusive=mutually_exclusive,\\n required_if=required_if,\\n supports_check_mode=True)\\n\\n def exec_module(self, **kwargs):\\n\\n for key in self.module_arg_spec.keys() + ['tags']:\\n setattr(self, key, kwargs[key])\\n\\n self.results['check_mode'] = self.check_mode\\n\\n resource_group = self.get_resource_group(self.resource_group)\\n if not self.location:\\n # Set default location\\n self.location = resource_group.location\\n\\n if not NAME_PATTERN.match(self.name):\\n self.fail(\\\"Parameter error: name must begin with a letter or number, end with a letter, number \\\"\\n \\\"or underscore and may contain only letters, numbers, periods, underscores or hyphens.\\\")\\n\\n if self.state == 'present' and self.purge_address_prefixes:\\n for prefix in self.address_prefixes_cidr:\\n if not CIDR_PATTERN.match(prefix):\\n self.fail(\\\"Parameter error: invalid address prefix value {0}\\\".format(prefix))\\n\\n if self.dns_servers and len(self.dns_servers) > 2:\\n self.fail(\\\"Parameter error: You can provide a maximum of 2 DNS servers.\\\")\\n\\n changed = False\\n results = dict()\\n\\n try:\\n self.log('Fetching vnet {0}'.format(self.name))\\n vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)\\n\\n results = virtual_network_to_dict(vnet)\\n self.log('Vnet exists {0}'.format(self.name))\\n self.log(results, pretty_print=True)\\n self.check_provisioning_state(vnet, self.state)\\n\\n if self.state == 'present':\\n if self.address_prefixes_cidr:\\n existing_address_prefix_set = set(vnet.address_space.address_prefixes)\\n requested_address_prefix_set = set(self.address_prefixes_cidr)\\n missing_prefixes = requested_address_prefix_set - existing_address_prefix_set\\n extra_prefixes = existing_address_prefix_set - requested_address_prefix_set\\n if len(missing_prefixes) > 0:\\n self.log('CHANGED: there are missing address_prefixes')\\n changed = True\\n if not self.purge_address_prefixes:\\n # add the missing prefixes\\n for prefix in missing_prefixes:\\n results['address_prefixes'].append(prefix)\\n\\n if len(extra_prefixes) > 0 and self.purge_address_prefixes:\\n self.log('CHANGED: there are address_prefixes to purge')\\n changed = True\\n # replace existing address prefixes with requested set\\n results['address_prefixes'] = self.address_prefixes_cidr\\n\\n update_tags, results['tags'] = self.update_tags(results['tags'])\\n if update_tags:\\n changed = True\\n\\n if self.dns_servers:\\n existing_dns_set = set(vnet.dhcp_options.dns_servers)\\n requested_dns_set = set(self.dns_servers)\\n if existing_dns_set != requested_dns_set:\\n self.log('CHANGED: replacing DNS servers')\\n changed = True\\n results['dns_servers'] = self.dns_servers\\n\\n if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\\n self.log('CHANGED: purging existing DNS servers')\\n changed = True\\n results['dns_servers'] = []\\n elif self.state == 'absent':\\n self.log(\\\"CHANGED: vnet exists but requested state is 'absent'\\\")\\n changed = True\\n except CloudError:\\n self.log('Vnet {0} does not exist'.format(self.name))\\n if self.state == 'present':\\n self.log(\\\"CHANGED: vnet {0} does not exist but requested state is 'present'\\\".format(self.name))\\n changed = True\\n\\n self.results['changed'] = changed\\n self.results['state'] = results\\n\\n if self.check_mode:\\n return self.results\\n\\n if changed:\\n if self.state == 'present':\\n if not results:\\n # create a new virtual network\\n self.log(\\\"Create virtual network {0}\\\".format(self.name))\\n if not self.address_prefixes_cidr:\\n self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')\\n vnet = VirtualNetwork(\\n location=self.location,\\n address_space=AddressSpace(\\n address_prefixes=self.address_prefixes_cidr\\n )\\n )\\n if self.dns_servers:\\n vnet.dhcp_options = DhcpOptions(\\n dns_servers=self.dns_servers\\n )\\n if self.tags:\\n vnet.tags = self.tags\\n self.results['state'] = self.create_or_update_vnet(vnet)\\n else:\\n # update existing virtual network\\n self.log(\\\"Update virtual network {0}\\\".format(self.name))\\n vnet = VirtualNetwork(\\n location=results['location'],\\n address_space=AddressSpace(\\n address_prefixes=results['address_prefixes']\\n ),\\n tags=results['tags']\\n )\\n if results.get('dns_servers'):\\n vnet.dhcp_options = DhcpOptions(\\n dns_servers=results['dns_servers']\\n )\\n self.results['state'] = self.create_or_update_vnet(vnet)\\n elif self.state == 'absent':\\n self.delete_virtual_network()\\n self.results['state']['status'] = 'Deleted'\\n\\n\\n return self.results\\n\\n def create_or_update_vnet(self, vnet):\\n try:\\n poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)\\n new_vnet = self.get_poller_result(poller)\\n except Exception as exc:\\n self.fail(\\\"Error creating or updating virtual network {0} - {1}\\\".format(self.name, str(exc)))\\n return virtual_network_to_dict(new_vnet)\\n\\n def delete_virtual_network(self):\\n try:\\n poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)\\n result = self.get_poller_result(poller)\\n except Exception as exc:\\n self.fail(\\\"Error deleting virtual network {0} - {1}\\\".format(self.name, str(exc)))\\n return result\\n\\n\\ndef main():\\n AzureRMVirtualNetwork()\\n\\nif __name__ == '__main__':\\n main()\\n\\n\",\n \"path\": \"cloud/azure/azure_rm_virtualnetwork.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/cloud/azure/azure_rm_virtualnetwork.py b/cloud/azure/azure_rm_virtualnetwork.py\nindex 9f1e7e61f23..d7bbdd00d85 100644\n--- a/cloud/azure/azure_rm_virtualnetwork.py\n+++ b/cloud/azure/azure_rm_virtualnetwork.py\n@@ -145,7 +145,7 @@\n pass\n \n \n-NAME_PATTERN = re.compile(r\"^[a-zA-Z0-9_]{1,61}[a-z0-9_]$\")\n+NAME_PATTERN = re.compile(r\"^[a-zA-Z0-9]+[a-zA-Z0-9\\._-]+[a-zA-Z0-9_]+$\")\n \n \n def virtual_network_to_dict(vnet):\n"}}},{"rowIdx":431,"cells":{"in_source_id":{"kind":"string","value":"optuna__optuna-1882"},"issue":{"kind":"string","value":"Remove the document for `with_traceback` method of Optuna's exception classes\nCurrently, Optuna's exception classes have the documentations of `with_traceback` method, which is inherited from `Exception`. I don't think it is informative for readers and it can be removed from the reference.\r\n\r\n![image](https://user-images.githubusercontent.com/3255979/89862021-0a4afd80-dbe2-11ea-8f86-626d960b4255.png)\r\n\r\nThe following `Exception` has the `with_traceback` method.\r\n- [ ] `optuna.exceptions.CLIUsageError`\r\n- [ ] `optuna.exceptions.OptunaError`\r\n- [ ] `optuna.exceptions.TrialPruned`\r\n- [ ] `optuna.exceptions.CLIUsageError`\r\n- [ ] `optuna.exceptions.StorageInternalError`\r\n- [ ] `optuna.exceptions.DuplicatedStudyError`\r\n\r\nCC @keisuke-umezawa Please let me know if you have any comments.\n"},"before_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport pkg_resources\n\nfrom sphinx_gallery.sorting import FileNameSortKey\n\n__version__ = pkg_resources.get_distribution('optuna').version\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Optuna'\ncopyright = '2018, Optuna Contributors.'\nauthor = 'Optuna Contributors.'\n\n# The short X.Y version\nversion = __version__\n# The full version, including alpha/beta/rc tags\nrelease = __version__\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'cliff.sphinxext',\n 'sphinx_gallery.gen_gallery',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'logo_only': True\n}\n\nhtml_favicon = '../image/favicon.ico'\n\nhtml_logo = '../image/optuna-logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', 'plotly_figures']\nhtml_css_files = [\"css/custom.css\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optunadoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\n 'One line description of project.', 'Miscellaneous'),\n]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# -- Extension configuration -------------------------------------------------\nautosummary_generate = True\nautodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n}\n\nsphinx_gallery_conf = {\n 'examples_dirs': [\n '../../tutorial',\n ],\n 'gallery_dirs': [\n 'tutorial',\n ],\n 'within_subsection_order': FileNameSortKey,\n 'filename_pattern': r'/*\\.py',\n 'first_notebook_cell': None,\n}\n","path":"docs/source/conf.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n#\\n# Configuration file for the Sphinx documentation builder.\\n#\\n# This file does only contain a selection of the most common options. For a\\n# full list see the documentation:\\n# http://www.sphinx-doc.org/en/master/config\\n\\n# -- Path setup --------------------------------------------------------------\\n\\n# If extensions (or modules to document with autodoc) are in another directory,\\n# add these directories to sys.path here. If the directory is relative to the\\n# documentation root, use os.path.abspath to make it absolute, like shown here.\\n#\\n# import os\\n# import sys\\n# sys.path.insert(0, os.path.abspath('.'))\\n\\nimport pkg_resources\\n\\nfrom sphinx_gallery.sorting import FileNameSortKey\\n\\n__version__ = pkg_resources.get_distribution('optuna').version\\n\\n# -- Project information -----------------------------------------------------\\n\\nproject = 'Optuna'\\ncopyright = '2018, Optuna Contributors.'\\nauthor = 'Optuna Contributors.'\\n\\n# The short X.Y version\\nversion = __version__\\n# The full version, including alpha/beta/rc tags\\nrelease = __version__\\n\\n# -- General configuration ---------------------------------------------------\\n\\n# If your documentation needs a minimal Sphinx version, state it here.\\n#\\n# needs_sphinx = '1.0'\\n\\n# Add any Sphinx extension module names here, as strings. They can be\\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\\n# ones.\\nextensions = [\\n 'sphinx.ext.autodoc',\\n 'sphinx.ext.autosummary',\\n 'sphinx.ext.doctest',\\n 'sphinx.ext.intersphinx',\\n 'sphinx.ext.mathjax',\\n 'sphinx.ext.napoleon',\\n 'sphinx.ext.viewcode',\\n 'sphinx.ext.githubpages',\\n 'cliff.sphinxext',\\n 'sphinx_gallery.gen_gallery',\\n]\\n\\n# Add any paths that contain templates here, relative to this directory.\\ntemplates_path = ['_templates']\\n\\n# The suffix(es) of source filenames.\\n# You can specify multiple suffix as a list of string:\\n#\\n# source_suffix = ['.rst', '.md']\\nsource_suffix = '.rst'\\n\\n# The master toctree document.\\nmaster_doc = 'index'\\n\\n# The language for content autogenerated by Sphinx. Refer to documentation\\n# for a list of supported languages.\\n#\\n# This is also used if you do content translation via gettext catalogs.\\n# Usually you set \\\"language\\\" from the command line for these cases.\\nlanguage = None\\n\\n# List of patterns, relative to source directory, that match files and\\n# directories to ignore when looking for source files.\\n# This pattern also affects html_static_path and html_extra_path .\\nexclude_patterns = []\\n\\n# The name of the Pygments (syntax highlighting) style to use.\\npygments_style = 'sphinx'\\n\\n# -- Options for HTML output -------------------------------------------------\\n\\n# The theme to use for HTML and HTML Help pages. See the documentation for\\n# a list of builtin themes.\\n#\\nhtml_theme = 'sphinx_rtd_theme'\\n\\n# Theme options are theme-specific and customize the look and feel of a theme\\n# further. For a list of options available for each theme, see the\\n# documentation.\\n#\\nhtml_theme_options = {\\n 'logo_only': True\\n}\\n\\nhtml_favicon = '../image/favicon.ico'\\n\\nhtml_logo = '../image/optuna-logo.png'\\n\\n# Add any paths that contain custom static files (such as style sheets) here,\\n# relative to this directory. They are copied after the builtin static files,\\n# so a file named \\\"default.css\\\" will overwrite the builtin \\\"default.css\\\".\\nhtml_static_path = ['_static', 'plotly_figures']\\nhtml_css_files = [\\\"css/custom.css\\\"]\\n\\n# Custom sidebar templates, must be a dictionary that maps document names\\n# to template names.\\n#\\n# The default sidebars (for documents that don't match any pattern) are\\n# defined by theme itself. Builtin themes are using these templates by\\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\\n# 'searchbox.html']``.\\n#\\n# html_sidebars = {}\\n\\n# -- Options for HTMLHelp output ---------------------------------------------\\n\\n# Output file base name for HTML help builder.\\nhtmlhelp_basename = 'Optunadoc'\\n\\n# -- Options for LaTeX output ------------------------------------------------\\n\\nlatex_elements = {\\n # The paper size ('letterpaper' or 'a4paper').\\n #\\n # 'papersize': 'letterpaper',\\n\\n # The font size ('10pt', '11pt' or '12pt').\\n #\\n # 'pointsize': '10pt',\\n\\n # Additional stuff for the LaTeX preamble.\\n #\\n # 'preamble': '',\\n\\n # Latex figure (float) alignment\\n #\\n # 'figure_align': 'htbp',\\n}\\n\\n# Grouping the document tree into LaTeX files. List of tuples\\n# (source start file, target name, title,\\n# author, documentclass [howto, manual, or own class]).\\nlatex_documents = [\\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\\n]\\n\\n# -- Options for manual page output ------------------------------------------\\n\\n# One entry per manual page. List of tuples\\n# (source start file, name, description, authors, manual section).\\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\\n\\n# -- Options for Texinfo output ----------------------------------------------\\n\\n# Grouping the document tree into Texinfo files. List of tuples\\n# (source start file, target name, title, author,\\n# dir menu entry, description, category)\\ntexinfo_documents = [\\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\\n 'One line description of project.', 'Miscellaneous'),\\n]\\n\\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\\n\\n# -- Extension configuration -------------------------------------------------\\nautosummary_generate = True\\nautodoc_default_options = {\\n 'members': True,\\n 'inherited-members': True,\\n}\\n\\nsphinx_gallery_conf = {\\n 'examples_dirs': [\\n '../../tutorial',\\n ],\\n 'gallery_dirs': [\\n 'tutorial',\\n ],\\n 'within_subsection_order': FileNameSortKey,\\n 'filename_pattern': r'/*\\\\.py',\\n 'first_notebook_cell': None,\\n}\\n\",\n \"path\": \"docs/source/conf.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport pkg_resources\n\nfrom sphinx_gallery.sorting import FileNameSortKey\n\n__version__ = pkg_resources.get_distribution('optuna').version\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Optuna'\ncopyright = '2018, Optuna Contributors.'\nauthor = 'Optuna Contributors.'\n\n# The short X.Y version\nversion = __version__\n# The full version, including alpha/beta/rc tags\nrelease = __version__\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'cliff.sphinxext',\n 'sphinx_gallery.gen_gallery',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'logo_only': True\n}\n\nhtml_favicon = '../image/favicon.ico'\n\nhtml_logo = '../image/optuna-logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', 'plotly_figures']\nhtml_css_files = [\"css/custom.css\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optunadoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\n 'One line description of project.', 'Miscellaneous'),\n]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# -- Extension configuration -------------------------------------------------\nautosummary_generate = True\nautodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n 'exclude-members': 'with_traceback',\n}\n\nsphinx_gallery_conf = {\n 'examples_dirs': [\n '../../tutorial',\n ],\n 'gallery_dirs': [\n 'tutorial',\n ],\n 'within_subsection_order': FileNameSortKey,\n 'filename_pattern': r'/*\\.py',\n 'first_notebook_cell': None,\n}\n","path":"docs/source/conf.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n#\\n# Configuration file for the Sphinx documentation builder.\\n#\\n# This file does only contain a selection of the most common options. For a\\n# full list see the documentation:\\n# http://www.sphinx-doc.org/en/master/config\\n\\n# -- Path setup --------------------------------------------------------------\\n\\n# If extensions (or modules to document with autodoc) are in another directory,\\n# add these directories to sys.path here. If the directory is relative to the\\n# documentation root, use os.path.abspath to make it absolute, like shown here.\\n#\\n# import os\\n# import sys\\n# sys.path.insert(0, os.path.abspath('.'))\\n\\nimport pkg_resources\\n\\nfrom sphinx_gallery.sorting import FileNameSortKey\\n\\n__version__ = pkg_resources.get_distribution('optuna').version\\n\\n# -- Project information -----------------------------------------------------\\n\\nproject = 'Optuna'\\ncopyright = '2018, Optuna Contributors.'\\nauthor = 'Optuna Contributors.'\\n\\n# The short X.Y version\\nversion = __version__\\n# The full version, including alpha/beta/rc tags\\nrelease = __version__\\n\\n# -- General configuration ---------------------------------------------------\\n\\n# If your documentation needs a minimal Sphinx version, state it here.\\n#\\n# needs_sphinx = '1.0'\\n\\n# Add any Sphinx extension module names here, as strings. They can be\\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\\n# ones.\\nextensions = [\\n 'sphinx.ext.autodoc',\\n 'sphinx.ext.autosummary',\\n 'sphinx.ext.doctest',\\n 'sphinx.ext.intersphinx',\\n 'sphinx.ext.mathjax',\\n 'sphinx.ext.napoleon',\\n 'sphinx.ext.viewcode',\\n 'sphinx.ext.githubpages',\\n 'cliff.sphinxext',\\n 'sphinx_gallery.gen_gallery',\\n]\\n\\n# Add any paths that contain templates here, relative to this directory.\\ntemplates_path = ['_templates']\\n\\n# The suffix(es) of source filenames.\\n# You can specify multiple suffix as a list of string:\\n#\\n# source_suffix = ['.rst', '.md']\\nsource_suffix = '.rst'\\n\\n# The master toctree document.\\nmaster_doc = 'index'\\n\\n# The language for content autogenerated by Sphinx. Refer to documentation\\n# for a list of supported languages.\\n#\\n# This is also used if you do content translation via gettext catalogs.\\n# Usually you set \\\"language\\\" from the command line for these cases.\\nlanguage = None\\n\\n# List of patterns, relative to source directory, that match files and\\n# directories to ignore when looking for source files.\\n# This pattern also affects html_static_path and html_extra_path .\\nexclude_patterns = []\\n\\n# The name of the Pygments (syntax highlighting) style to use.\\npygments_style = 'sphinx'\\n\\n# -- Options for HTML output -------------------------------------------------\\n\\n# The theme to use for HTML and HTML Help pages. See the documentation for\\n# a list of builtin themes.\\n#\\nhtml_theme = 'sphinx_rtd_theme'\\n\\n# Theme options are theme-specific and customize the look and feel of a theme\\n# further. For a list of options available for each theme, see the\\n# documentation.\\n#\\nhtml_theme_options = {\\n 'logo_only': True\\n}\\n\\nhtml_favicon = '../image/favicon.ico'\\n\\nhtml_logo = '../image/optuna-logo.png'\\n\\n# Add any paths that contain custom static files (such as style sheets) here,\\n# relative to this directory. They are copied after the builtin static files,\\n# so a file named \\\"default.css\\\" will overwrite the builtin \\\"default.css\\\".\\nhtml_static_path = ['_static', 'plotly_figures']\\nhtml_css_files = [\\\"css/custom.css\\\"]\\n\\n# Custom sidebar templates, must be a dictionary that maps document names\\n# to template names.\\n#\\n# The default sidebars (for documents that don't match any pattern) are\\n# defined by theme itself. Builtin themes are using these templates by\\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\\n# 'searchbox.html']``.\\n#\\n# html_sidebars = {}\\n\\n# -- Options for HTMLHelp output ---------------------------------------------\\n\\n# Output file base name for HTML help builder.\\nhtmlhelp_basename = 'Optunadoc'\\n\\n# -- Options for LaTeX output ------------------------------------------------\\n\\nlatex_elements = {\\n # The paper size ('letterpaper' or 'a4paper').\\n #\\n # 'papersize': 'letterpaper',\\n\\n # The font size ('10pt', '11pt' or '12pt').\\n #\\n # 'pointsize': '10pt',\\n\\n # Additional stuff for the LaTeX preamble.\\n #\\n # 'preamble': '',\\n\\n # Latex figure (float) alignment\\n #\\n # 'figure_align': 'htbp',\\n}\\n\\n# Grouping the document tree into LaTeX files. List of tuples\\n# (source start file, target name, title,\\n# author, documentclass [howto, manual, or own class]).\\nlatex_documents = [\\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\\n]\\n\\n# -- Options for manual page output ------------------------------------------\\n\\n# One entry per manual page. List of tuples\\n# (source start file, name, description, authors, manual section).\\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\\n\\n# -- Options for Texinfo output ----------------------------------------------\\n\\n# Grouping the document tree into Texinfo files. List of tuples\\n# (source start file, target name, title, author,\\n# dir menu entry, description, category)\\ntexinfo_documents = [\\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\\n 'One line description of project.', 'Miscellaneous'),\\n]\\n\\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\\n\\n# -- Extension configuration -------------------------------------------------\\nautosummary_generate = True\\nautodoc_default_options = {\\n 'members': True,\\n 'inherited-members': True,\\n 'exclude-members': 'with_traceback',\\n}\\n\\nsphinx_gallery_conf = {\\n 'examples_dirs': [\\n '../../tutorial',\\n ],\\n 'gallery_dirs': [\\n 'tutorial',\\n ],\\n 'within_subsection_order': FileNameSortKey,\\n 'filename_pattern': r'/*\\\\.py',\\n 'first_notebook_cell': None,\\n}\\n\",\n \"path\": \"docs/source/conf.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/docs/source/conf.py b/docs/source/conf.py\nindex 16d954afae..05540085a2 100644\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -172,6 +172,7 @@\n autodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n+ 'exclude-members': 'with_traceback',\n }\n \n sphinx_gallery_conf = {\n"}}},{"rowIdx":432,"cells":{"in_source_id":{"kind":"string","value":"bokeh__bokeh-1948"},"issue":{"kind":"string","value":"`publishing` example from the `embed` directory fails\nWith an error message that is not particularly helpfull. Using bokeh 0.8.\n\n```\n➜ embed python publishing.py\nUsing saved session configuration for http://localhost:5006/\nTo override, pass 'load_from_config=False' to Session\nINFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost\nTraceback (most recent call last):\n File \"publishing.py\", line 11, in \n Session().register('testuser', 'testpassword')\n File \"/Users/nicolas/anaconda/lib/python2.7/site-packages/bokeh/session.py\", line 208, in register\n raise RuntimeError(\"Unknown Error\")\nRuntimeError: Unknown Error\n```\n\n"},"before_files":{"kind":"list like","value":[{"content":"# The plot server must be running\n# Go to http://localhost:5006/bokeh to view this plot\n\nimport time\n\nimport numpy as np\n\nfrom bokeh.plotting import *\nfrom bokeh.session import Session\nfrom bokeh import embed\nSession().register('testuser', 'testpassword')\nN = 80\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\noutput_server(\"line_animate\")\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select\"\np = figure(tools=TOOLS)\np.circle(x, y, color=\"#3333ee\", name=\"sin\")\npush()\nrenderer = p.select(dict(name=\"sin\"))\nds = renderer[0].data_source\ncursession().publish()\ntag = embed.autoload_server(p, cursession(), public=True)\nhtml = \"\"\"\n\n\n\n%s\n\n\n\"\"\"\nhtml = html % (tag)\nwith open(\"publishing.html\", \"w+\") as f:\n f.write(html)\nwhile True:\n for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):\n ds.data[\"y\"] = y * i\n cursession().store_objects(ds)\n time.sleep(1.0)\n","path":"examples/embed/publishing.py"}],"string":"[\n {\n \"content\": \"# The plot server must be running\\n# Go to http://localhost:5006/bokeh to view this plot\\n\\nimport time\\n\\nimport numpy as np\\n\\nfrom bokeh.plotting import *\\nfrom bokeh.session import Session\\nfrom bokeh import embed\\nSession().register('testuser', 'testpassword')\\nN = 80\\nx = np.linspace(0, 4*np.pi, N)\\ny = np.sin(x)\\noutput_server(\\\"line_animate\\\")\\nTOOLS = \\\"pan,wheel_zoom,box_zoom,reset,save,box_select\\\"\\np = figure(tools=TOOLS)\\np.circle(x, y, color=\\\"#3333ee\\\", name=\\\"sin\\\")\\npush()\\nrenderer = p.select(dict(name=\\\"sin\\\"))\\nds = renderer[0].data_source\\ncursession().publish()\\ntag = embed.autoload_server(p, cursession(), public=True)\\nhtml = \\\"\\\"\\\"\\n\\n\\n\\n%s\\n\\n\\n\\\"\\\"\\\"\\nhtml = html % (tag)\\nwith open(\\\"publishing.html\\\", \\\"w+\\\") as f:\\n f.write(html)\\nwhile True:\\n for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):\\n ds.data[\\\"y\\\"] = y * i\\n cursession().store_objects(ds)\\n time.sleep(1.0)\\n\",\n \"path\": \"examples/embed/publishing.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# The plot server must be running\n# The server must run with --multi-user for this example to work\n# Go to http://localhost:5006/bokeh to view this plot\n\nimport time\n\nimport numpy as np\n\nfrom bokeh.plotting import *\nfrom bokeh.session import Session\nfrom bokeh import embed\nSession().register('testuser', 'testpassword')\nN = 80\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\noutput_server(\"line_animate\")\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select\"\np = figure(tools=TOOLS)\np.circle(x, y, color=\"#3333ee\", name=\"sin\")\npush()\nrenderer = p.select(dict(name=\"sin\"))\nds = renderer[0].data_source\ncursession().publish()\ntag = embed.autoload_server(p, cursession(), public=True)\nhtml = \"\"\"\n\n\n\n%s\n\n\n\"\"\"\nhtml = html % (tag)\nwith open(\"publishing.html\", \"w+\") as f:\n f.write(html)\nwhile True:\n for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):\n ds.data[\"y\"] = y * i\n cursession().store_objects(ds)\n time.sleep(1.0)\n","path":"examples/embed/publishing.py"}],"string":"[\n {\n \"content\": \"# The plot server must be running\\n# The server must run with --multi-user for this example to work\\n# Go to http://localhost:5006/bokeh to view this plot\\n\\nimport time\\n\\nimport numpy as np\\n\\nfrom bokeh.plotting import *\\nfrom bokeh.session import Session\\nfrom bokeh import embed\\nSession().register('testuser', 'testpassword')\\nN = 80\\nx = np.linspace(0, 4*np.pi, N)\\ny = np.sin(x)\\noutput_server(\\\"line_animate\\\")\\nTOOLS = \\\"pan,wheel_zoom,box_zoom,reset,save,box_select\\\"\\np = figure(tools=TOOLS)\\np.circle(x, y, color=\\\"#3333ee\\\", name=\\\"sin\\\")\\npush()\\nrenderer = p.select(dict(name=\\\"sin\\\"))\\nds = renderer[0].data_source\\ncursession().publish()\\ntag = embed.autoload_server(p, cursession(), public=True)\\nhtml = \\\"\\\"\\\"\\n\\n\\n\\n%s\\n\\n\\n\\\"\\\"\\\"\\nhtml = html % (tag)\\nwith open(\\\"publishing.html\\\", \\\"w+\\\") as f:\\n f.write(html)\\nwhile True:\\n for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):\\n ds.data[\\\"y\\\"] = y * i\\n cursession().store_objects(ds)\\n time.sleep(1.0)\\n\",\n \"path\": \"examples/embed/publishing.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/examples/embed/README.md b/examples/embed/README.md\nindex f4e77c448c9..9b98f24661e 100644\n--- a/examples/embed/README.md\n+++ b/examples/embed/README.md\n@@ -2,7 +2,11 @@ To try these example you first have to start the bokeh-server, ie.,\n \n bokeh-server --backend=memory\n \n-and then run the examples:\n+Some examples (e.g. publishing) need the server to run in multi-user mode:\n+ \n+ bokeh-server -m --backend=memory\n+\n+Then run the examples:\n \n python widget.py\n \n@@ -10,6 +14,7 @@ or\n \n python animated.py\n \n+\n To view them, start a web server in this directory, for instance, the server\n built into python:\n \ndiff --git a/examples/embed/publishing.py b/examples/embed/publishing.py\nindex a099eda61ea..d0dcd17888b 100644\n--- a/examples/embed/publishing.py\n+++ b/examples/embed/publishing.py\n@@ -1,4 +1,5 @@\n # The plot server must be running\n+# The server must run with --multi-user for this example to work\n # Go to http://localhost:5006/bokeh to view this plot\n \n import time\n"}}},{"rowIdx":433,"cells":{"in_source_id":{"kind":"string","value":"mozilla__bugbug-3941"},"issue":{"kind":"string","value":"[model:accessibility] Add the model to `http_service` and `data_pipeline`\nDepends on merging: #3775\n"},"before_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n","path":"http_service/bugbug_http/models.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n# This Source Code Form is subject to the terms of the Mozilla Public\\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\\n# You can obtain one at http://mozilla.org/MPL/2.0/.\\n\\nimport logging\\nimport os\\nfrom datetime import timedelta\\nfrom functools import lru_cache\\nfrom typing import Sequence\\n\\nimport orjson\\nimport requests\\nimport zstandard\\nfrom redis import Redis\\n\\nfrom bugbug import bugzilla, repository, test_scheduling\\nfrom bugbug.github import Github\\nfrom bugbug.model import Model\\nfrom bugbug.models import testselect\\nfrom bugbug.utils import get_hgmo_stack\\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\\n\\nlogging.basicConfig(level=logging.INFO)\\nLOGGER = logging.getLogger()\\n\\nMODELS_NAMES = [\\n \\\"defectenhancementtask\\\",\\n \\\"component\\\",\\n \\\"invalidcompatibilityreport\\\",\\n \\\"needsdiagnosis\\\",\\n \\\"regression\\\",\\n \\\"stepstoreproduce\\\",\\n \\\"spambug\\\",\\n \\\"testlabelselect\\\",\\n \\\"testgroupselect\\\",\\n]\\n\\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\\nredis = Redis.from_url(os.environ.get(\\\"REDIS_URL\\\", \\\"redis://localhost/0\\\"))\\n\\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\\n timedelta(hours=1), lambda m: Model.load(f\\\"{m}model\\\")\\n)\\nMODEL_CACHE.start_ttl_thread()\\n\\ncctx = zstandard.ZstdCompressor(level=10)\\n\\n\\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\\n LOGGER.debug(f\\\"Storing data at {key}: {value!r}\\\")\\n if compress:\\n value = cctx.compress(value)\\n redis.set(key, value)\\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\\n\\n\\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\\n from bugbug_http.app import JobInfo\\n\\n # This should be called in a process worker so it should be safe to set\\n # the token here\\n bug_ids_set = set(map(int, bug_ids))\\n bugzilla.set_token(bugzilla_token)\\n\\n bugs = bugzilla.get(bug_ids)\\n\\n missing_bugs = bug_ids_set.difference(bugs.keys())\\n\\n for bug_id in missing_bugs:\\n job = JobInfo(classify_bug, model_name, bug_id)\\n\\n # TODO: Find a better error format\\n setkey(job.result_key, orjson.dumps({\\\"available\\\": False}))\\n\\n if not bugs:\\n return \\\"NOK\\\"\\n\\n model = MODEL_CACHE.get(model_name)\\n\\n if not model:\\n LOGGER.info(\\\"Missing model %r, aborting\\\" % model_name)\\n return \\\"NOK\\\"\\n\\n model_extra_data = model.get_extra_data()\\n\\n # TODO: Classify could choke on a single bug which could make the whole\\n # job to fails. What should we do here?\\n probs = model.classify(list(bugs.values()), True)\\n indexes = probs.argmax(axis=-1)\\n suggestions = model.le.inverse_transform(indexes)\\n\\n probs_list = probs.tolist()\\n indexes_list = indexes.tolist()\\n suggestions_list = suggestions.tolist()\\n\\n for i, bug_id in enumerate(bugs.keys()):\\n data = {\\n \\\"prob\\\": probs_list[i],\\n \\\"index\\\": indexes_list[i],\\n \\\"class\\\": suggestions_list[i],\\n \\\"extra_data\\\": model_extra_data,\\n }\\n\\n job = JobInfo(classify_bug, model_name, bug_id)\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n # Save the bug last change\\n setkey(job.change_time_key, bugs[bug_id][\\\"last_change_time\\\"].encode())\\n\\n return \\\"OK\\\"\\n\\n\\ndef classify_issue(\\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\\n) -> str:\\n from bugbug_http.app import JobInfo\\n\\n github = Github(owner=owner, repo=repo)\\n\\n issue_ids_set = set(map(int, issue_nums))\\n\\n issues = {\\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\\n for issue_num in issue_nums\\n }\\n\\n missing_issues = issue_ids_set.difference(issues.keys())\\n\\n for issue_id in missing_issues:\\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\\n\\n # TODO: Find a better error format\\n setkey(job.result_key, orjson.dumps({\\\"available\\\": False}))\\n\\n if not issues:\\n return \\\"NOK\\\"\\n\\n model = MODEL_CACHE.get(model_name)\\n\\n if not model:\\n LOGGER.info(\\\"Missing model %r, aborting\\\" % model_name)\\n return \\\"NOK\\\"\\n\\n model_extra_data = model.get_extra_data()\\n\\n # TODO: Classify could choke on a single bug which could make the whole\\n # job to fail. What should we do here?\\n probs = model.classify(list(issues.values()), True)\\n indexes = probs.argmax(axis=-1)\\n suggestions = model.le.inverse_transform(indexes)\\n\\n probs_list = probs.tolist()\\n indexes_list = indexes.tolist()\\n suggestions_list = suggestions.tolist()\\n\\n for i, issue_id in enumerate(issues.keys()):\\n data = {\\n \\\"prob\\\": probs_list[i],\\n \\\"index\\\": indexes_list[i],\\n \\\"class\\\": suggestions_list[i],\\n \\\"extra_data\\\": model_extra_data,\\n }\\n\\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n # Save the bug last change\\n setkey(job.change_time_key, issues[issue_id][\\\"updated_at\\\"].encode())\\n\\n return \\\"OK\\\"\\n\\n\\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\\n from bugbug_http.app import JobInfo\\n\\n reports = {\\n report[\\\"uuid\\\"]: {\\\"title\\\": report[\\\"title\\\"], \\\"body\\\": report[\\\"body\\\"]}\\n for report in reports_data\\n }\\n\\n if not reports:\\n return \\\"NOK\\\"\\n\\n model = MODEL_CACHE.get(model_name)\\n\\n if not model:\\n LOGGER.info(\\\"Missing model %r, aborting\\\" % model_name)\\n return \\\"NOK\\\"\\n\\n model_extra_data = model.get_extra_data()\\n probs = model.classify(list(reports.values()), True)\\n indexes = probs.argmax(axis=-1)\\n suggestions = model.le.inverse_transform(indexes)\\n\\n probs_list = probs.tolist()\\n indexes_list = indexes.tolist()\\n suggestions_list = suggestions.tolist()\\n\\n for i, report_uuid in enumerate(reports.keys()):\\n data = {\\n \\\"prob\\\": probs_list[i],\\n \\\"index\\\": indexes_list[i],\\n \\\"class\\\": suggestions_list[i],\\n \\\"extra_data\\\": model_extra_data,\\n }\\n\\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n return \\\"OK\\\"\\n\\n\\n@lru_cache(maxsize=None)\\ndef get_known_tasks() -> tuple[str, ...]:\\n with open(\\\"known_tasks\\\", \\\"r\\\") as f:\\n return tuple(line.strip() for line in f)\\n\\n\\ndef schedule_tests(branch: str, rev: str) -> str:\\n from bugbug_http import REPO_DIR\\n from bugbug_http.app import JobInfo\\n\\n job = JobInfo(schedule_tests, branch, rev)\\n LOGGER.info(\\\"Processing %s...\\\", job)\\n\\n # Pull the revision to the local repository\\n LOGGER.info(\\\"Pulling commits from the remote repository...\\\")\\n repository.pull(REPO_DIR, branch, rev)\\n\\n # Load the full stack of patches leading to that revision\\n LOGGER.info(\\\"Loading commits to analyze using automationrelevance...\\\")\\n try:\\n revs = get_hgmo_stack(branch, rev)\\n except requests.exceptions.RequestException:\\n LOGGER.warning(f\\\"Push not found for {branch} @ {rev}!\\\")\\n return \\\"NOK\\\"\\n\\n test_selection_threshold = float(\\n os.environ.get(\\\"TEST_SELECTION_CONFIDENCE_THRESHOLD\\\", 0.5)\\n )\\n\\n # On \\\"try\\\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\\n # On other repos, only consider \\\"tip\\\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\\n if branch == \\\"try\\\":\\n repo_branch = None\\n else:\\n repo_branch = \\\"tip\\\"\\n\\n # Analyze patches.\\n commits = repository.download_commits(\\n REPO_DIR,\\n revs=revs,\\n branch=repo_branch,\\n save=False,\\n use_single_process=True,\\n include_no_bug=True,\\n )\\n\\n if len(commits) > 0:\\n testlabelselect_model = MODEL_CACHE.get(\\\"testlabelselect\\\")\\n testgroupselect_model = MODEL_CACHE.get(\\\"testgroupselect\\\")\\n\\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\\n\\n reduced = testselect.reduce_configs(\\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\\n )\\n\\n reduced_higher = testselect.reduce_configs(\\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\\n )\\n\\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\\n\\n config_groups = testselect.select_configs(groups.keys(), 0.9)\\n else:\\n tasks = {}\\n reduced = set()\\n groups = {}\\n config_groups = {}\\n\\n data = {\\n \\\"tasks\\\": tasks,\\n \\\"groups\\\": groups,\\n \\\"config_groups\\\": config_groups,\\n \\\"reduced_tasks\\\": {t: c for t, c in tasks.items() if t in reduced},\\n \\\"reduced_tasks_higher\\\": {t: c for t, c in tasks.items() if t in reduced_higher},\\n \\\"known_tasks\\\": get_known_tasks(),\\n }\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n return \\\"OK\\\"\\n\\n\\ndef get_config_specific_groups(config: str) -> str:\\n from bugbug_http.app import JobInfo\\n\\n job = JobInfo(get_config_specific_groups, config)\\n LOGGER.info(\\\"Processing %s...\\\", job)\\n\\n equivalence_sets = testselect._get_equivalence_sets(0.9)\\n\\n past_failures_data = test_scheduling.PastFailures(\\\"group\\\", True)\\n\\n setkey(\\n job.result_key,\\n orjson.dumps(\\n [\\n {\\\"name\\\": group}\\n for group in past_failures_data.all_runnables\\n if any(\\n equivalence_set == {config}\\n for equivalence_set in equivalence_sets[group]\\n )\\n ]\\n ),\\n compress=True,\\n )\\n\\n return \\\"OK\\\"\\n\",\n \"path\": \"http_service/bugbug_http/models.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n \"accessibility\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n","path":"http_service/bugbug_http/models.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n# This Source Code Form is subject to the terms of the Mozilla Public\\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\\n# You can obtain one at http://mozilla.org/MPL/2.0/.\\n\\nimport logging\\nimport os\\nfrom datetime import timedelta\\nfrom functools import lru_cache\\nfrom typing import Sequence\\n\\nimport orjson\\nimport requests\\nimport zstandard\\nfrom redis import Redis\\n\\nfrom bugbug import bugzilla, repository, test_scheduling\\nfrom bugbug.github import Github\\nfrom bugbug.model import Model\\nfrom bugbug.models import testselect\\nfrom bugbug.utils import get_hgmo_stack\\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\\n\\nlogging.basicConfig(level=logging.INFO)\\nLOGGER = logging.getLogger()\\n\\nMODELS_NAMES = [\\n \\\"defectenhancementtask\\\",\\n \\\"component\\\",\\n \\\"invalidcompatibilityreport\\\",\\n \\\"needsdiagnosis\\\",\\n \\\"regression\\\",\\n \\\"stepstoreproduce\\\",\\n \\\"spambug\\\",\\n \\\"testlabelselect\\\",\\n \\\"testgroupselect\\\",\\n \\\"accessibility\\\",\\n]\\n\\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\\nredis = Redis.from_url(os.environ.get(\\\"REDIS_URL\\\", \\\"redis://localhost/0\\\"))\\n\\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\\n timedelta(hours=1), lambda m: Model.load(f\\\"{m}model\\\")\\n)\\nMODEL_CACHE.start_ttl_thread()\\n\\ncctx = zstandard.ZstdCompressor(level=10)\\n\\n\\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\\n LOGGER.debug(f\\\"Storing data at {key}: {value!r}\\\")\\n if compress:\\n value = cctx.compress(value)\\n redis.set(key, value)\\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\\n\\n\\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\\n from bugbug_http.app import JobInfo\\n\\n # This should be called in a process worker so it should be safe to set\\n # the token here\\n bug_ids_set = set(map(int, bug_ids))\\n bugzilla.set_token(bugzilla_token)\\n\\n bugs = bugzilla.get(bug_ids)\\n\\n missing_bugs = bug_ids_set.difference(bugs.keys())\\n\\n for bug_id in missing_bugs:\\n job = JobInfo(classify_bug, model_name, bug_id)\\n\\n # TODO: Find a better error format\\n setkey(job.result_key, orjson.dumps({\\\"available\\\": False}))\\n\\n if not bugs:\\n return \\\"NOK\\\"\\n\\n model = MODEL_CACHE.get(model_name)\\n\\n if not model:\\n LOGGER.info(\\\"Missing model %r, aborting\\\" % model_name)\\n return \\\"NOK\\\"\\n\\n model_extra_data = model.get_extra_data()\\n\\n # TODO: Classify could choke on a single bug which could make the whole\\n # job to fails. What should we do here?\\n probs = model.classify(list(bugs.values()), True)\\n indexes = probs.argmax(axis=-1)\\n suggestions = model.le.inverse_transform(indexes)\\n\\n probs_list = probs.tolist()\\n indexes_list = indexes.tolist()\\n suggestions_list = suggestions.tolist()\\n\\n for i, bug_id in enumerate(bugs.keys()):\\n data = {\\n \\\"prob\\\": probs_list[i],\\n \\\"index\\\": indexes_list[i],\\n \\\"class\\\": suggestions_list[i],\\n \\\"extra_data\\\": model_extra_data,\\n }\\n\\n job = JobInfo(classify_bug, model_name, bug_id)\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n # Save the bug last change\\n setkey(job.change_time_key, bugs[bug_id][\\\"last_change_time\\\"].encode())\\n\\n return \\\"OK\\\"\\n\\n\\ndef classify_issue(\\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\\n) -> str:\\n from bugbug_http.app import JobInfo\\n\\n github = Github(owner=owner, repo=repo)\\n\\n issue_ids_set = set(map(int, issue_nums))\\n\\n issues = {\\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\\n for issue_num in issue_nums\\n }\\n\\n missing_issues = issue_ids_set.difference(issues.keys())\\n\\n for issue_id in missing_issues:\\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\\n\\n # TODO: Find a better error format\\n setkey(job.result_key, orjson.dumps({\\\"available\\\": False}))\\n\\n if not issues:\\n return \\\"NOK\\\"\\n\\n model = MODEL_CACHE.get(model_name)\\n\\n if not model:\\n LOGGER.info(\\\"Missing model %r, aborting\\\" % model_name)\\n return \\\"NOK\\\"\\n\\n model_extra_data = model.get_extra_data()\\n\\n # TODO: Classify could choke on a single bug which could make the whole\\n # job to fail. What should we do here?\\n probs = model.classify(list(issues.values()), True)\\n indexes = probs.argmax(axis=-1)\\n suggestions = model.le.inverse_transform(indexes)\\n\\n probs_list = probs.tolist()\\n indexes_list = indexes.tolist()\\n suggestions_list = suggestions.tolist()\\n\\n for i, issue_id in enumerate(issues.keys()):\\n data = {\\n \\\"prob\\\": probs_list[i],\\n \\\"index\\\": indexes_list[i],\\n \\\"class\\\": suggestions_list[i],\\n \\\"extra_data\\\": model_extra_data,\\n }\\n\\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n # Save the bug last change\\n setkey(job.change_time_key, issues[issue_id][\\\"updated_at\\\"].encode())\\n\\n return \\\"OK\\\"\\n\\n\\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\\n from bugbug_http.app import JobInfo\\n\\n reports = {\\n report[\\\"uuid\\\"]: {\\\"title\\\": report[\\\"title\\\"], \\\"body\\\": report[\\\"body\\\"]}\\n for report in reports_data\\n }\\n\\n if not reports:\\n return \\\"NOK\\\"\\n\\n model = MODEL_CACHE.get(model_name)\\n\\n if not model:\\n LOGGER.info(\\\"Missing model %r, aborting\\\" % model_name)\\n return \\\"NOK\\\"\\n\\n model_extra_data = model.get_extra_data()\\n probs = model.classify(list(reports.values()), True)\\n indexes = probs.argmax(axis=-1)\\n suggestions = model.le.inverse_transform(indexes)\\n\\n probs_list = probs.tolist()\\n indexes_list = indexes.tolist()\\n suggestions_list = suggestions.tolist()\\n\\n for i, report_uuid in enumerate(reports.keys()):\\n data = {\\n \\\"prob\\\": probs_list[i],\\n \\\"index\\\": indexes_list[i],\\n \\\"class\\\": suggestions_list[i],\\n \\\"extra_data\\\": model_extra_data,\\n }\\n\\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n return \\\"OK\\\"\\n\\n\\n@lru_cache(maxsize=None)\\ndef get_known_tasks() -> tuple[str, ...]:\\n with open(\\\"known_tasks\\\", \\\"r\\\") as f:\\n return tuple(line.strip() for line in f)\\n\\n\\ndef schedule_tests(branch: str, rev: str) -> str:\\n from bugbug_http import REPO_DIR\\n from bugbug_http.app import JobInfo\\n\\n job = JobInfo(schedule_tests, branch, rev)\\n LOGGER.info(\\\"Processing %s...\\\", job)\\n\\n # Pull the revision to the local repository\\n LOGGER.info(\\\"Pulling commits from the remote repository...\\\")\\n repository.pull(REPO_DIR, branch, rev)\\n\\n # Load the full stack of patches leading to that revision\\n LOGGER.info(\\\"Loading commits to analyze using automationrelevance...\\\")\\n try:\\n revs = get_hgmo_stack(branch, rev)\\n except requests.exceptions.RequestException:\\n LOGGER.warning(f\\\"Push not found for {branch} @ {rev}!\\\")\\n return \\\"NOK\\\"\\n\\n test_selection_threshold = float(\\n os.environ.get(\\\"TEST_SELECTION_CONFIDENCE_THRESHOLD\\\", 0.5)\\n )\\n\\n # On \\\"try\\\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\\n # On other repos, only consider \\\"tip\\\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\\n if branch == \\\"try\\\":\\n repo_branch = None\\n else:\\n repo_branch = \\\"tip\\\"\\n\\n # Analyze patches.\\n commits = repository.download_commits(\\n REPO_DIR,\\n revs=revs,\\n branch=repo_branch,\\n save=False,\\n use_single_process=True,\\n include_no_bug=True,\\n )\\n\\n if len(commits) > 0:\\n testlabelselect_model = MODEL_CACHE.get(\\\"testlabelselect\\\")\\n testgroupselect_model = MODEL_CACHE.get(\\\"testgroupselect\\\")\\n\\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\\n\\n reduced = testselect.reduce_configs(\\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\\n )\\n\\n reduced_higher = testselect.reduce_configs(\\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\\n )\\n\\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\\n\\n config_groups = testselect.select_configs(groups.keys(), 0.9)\\n else:\\n tasks = {}\\n reduced = set()\\n groups = {}\\n config_groups = {}\\n\\n data = {\\n \\\"tasks\\\": tasks,\\n \\\"groups\\\": groups,\\n \\\"config_groups\\\": config_groups,\\n \\\"reduced_tasks\\\": {t: c for t, c in tasks.items() if t in reduced},\\n \\\"reduced_tasks_higher\\\": {t: c for t, c in tasks.items() if t in reduced_higher},\\n \\\"known_tasks\\\": get_known_tasks(),\\n }\\n setkey(job.result_key, orjson.dumps(data), compress=True)\\n\\n return \\\"OK\\\"\\n\\n\\ndef get_config_specific_groups(config: str) -> str:\\n from bugbug_http.app import JobInfo\\n\\n job = JobInfo(get_config_specific_groups, config)\\n LOGGER.info(\\\"Processing %s...\\\", job)\\n\\n equivalence_sets = testselect._get_equivalence_sets(0.9)\\n\\n past_failures_data = test_scheduling.PastFailures(\\\"group\\\", True)\\n\\n setkey(\\n job.result_key,\\n orjson.dumps(\\n [\\n {\\\"name\\\": group}\\n for group in past_failures_data.all_runnables\\n if any(\\n equivalence_set == {config}\\n for equivalence_set in equivalence_sets[group]\\n )\\n ]\\n ),\\n compress=True,\\n )\\n\\n return \\\"OK\\\"\\n\",\n \"path\": \"http_service/bugbug_http/models.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py\nindex 6fb1247b65..3615050925 100644\n--- a/http_service/bugbug_http/models.py\n+++ b/http_service/bugbug_http/models.py\n@@ -34,6 +34,7 @@\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n+ \"accessibility\",\n ]\n \n DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\ndiff --git a/infra/data-pipeline.yml b/infra/data-pipeline.yml\nindex b1160932bb..c0ad93b0c6 100644\n--- a/infra/data-pipeline.yml\n+++ b/infra/data-pipeline.yml\n@@ -1003,6 +1003,44 @@ tasks:\n owner: release-mgmt-analysis@mozilla.com\n source: ${repository}/raw/master/data-pipeline.yml\n \n+ - ID: train-accessibility\n+ created: { $fromNow: \"\" }\n+ deadline: { $fromNow: \"3 days\" }\n+ expires: { $fromNow: \"1 year\" }\n+ provisionerId: proj-bugbug\n+ workerType: compute-smaller\n+ dependencies:\n+ - bugs-retrieval\n+ payload:\n+ maxRunTime: 25200\n+ image: mozilla/bugbug-base:${version}\n+ command:\n+ - bugbug-train\n+ - accessibility\n+\n+ artifacts:\n+ public/accessibilitymodel.tar.zst:\n+ expires: { $fromNow: \"1 month\" }\n+ path: /accessibilitymodel.tar.zst\n+ type: file\n+ public/metrics.json:\n+ expires: { $fromNow: \"1 year\" }\n+ path: /metrics.json\n+ type: file\n+\n+ routes:\n+ - notify.email.release-mgmt-analysis@mozilla.com.on-failed\n+ - notify.irc-channel.#bugbug.on-failed\n+ - index.project.bugbug.train_accessibility.${version}\n+ - index.project.bugbug.train_accessibility.per_version.${version}.${year}.${month}.${day}.${hour}.${minute}.${second}\n+ - index.project.bugbug.train_accessibility.per_date.${year}.${month}.${day}.${hour}.${minute}.${second}.${version}\n+ - index.project.bugbug.train_accessibility.latest\n+ metadata:\n+ name: bugbug train accessibility model\n+ description: bugbug train accessibility model\n+ owner: release-mgmt-analysis@mozilla.com\n+ source: ${repository}/raw/master/data-pipeline.yml\n+\n - ID: train-test-label-select\n created: { $fromNow: \"\" }\n deadline: { $fromNow: \"5 days\" }\n@@ -1215,6 +1253,7 @@ tasks:\n - train-test-group-select\n - train-test-failure\n - train-needsdiagnosis\n+ - train-accessibility\n payload:\n maxRunTime: 3600\n image: mozilla/bugbug-base:${version}\n@@ -1250,6 +1289,7 @@ tasks:\n - train-test-label-select\n - train-test-group-select\n - train-needsdiagnosis\n+ - train-accessibility\n payload:\n capabilities:\n privileged: true\n"}}},{"rowIdx":434,"cells":{"in_source_id":{"kind":"string","value":"getmoto__moto-698"},"issue":{"kind":"string","value":"Unable to create a key with a trailing slash using OrdinaryCallingFormat\nWhen using OrdinaryCallingFormat, it's not possible to create a key ending with a slash (e.g. when mimicking directory creation), since this is stripped off when parsing the key name. I can't comment on S3, but this is at least different behaviour from Ceph.\n\nFor example, the below fails as is, but works if the connection uses SubdomainCallingFormat instead.\n\n```\nimport boto\nimport moto\nimport unittest\n\n\nclass TestCreatingKeyEndingWithSlash(unittest.TestCase):\n\n @moto.mock_s3\n def test_ordinary_calling_format(self):\n bucket_name = 'testbucket'\n key_name = 'key_ending_with_slash/'\n\n conn = boto.connect_s3('access_key', 'secret_key',\n calling_format=boto.s3.connection.OrdinaryCallingFormat())\n bucket = conn.create_bucket(bucket_name)\n\n key = boto.s3.key.Key(bucket)\n key.key = key_name\n key.set_contents_from_string('')\n\n self.assertIn(key_name, [k.name for k in bucket.get_all_keys()])\n```\n\n"},"before_files":{"kind":"list like","value":[{"content":"from __future__ import unicode_literals\nfrom six.moves.urllib.parse import urlparse\n\n\ndef bucket_name_from_url(url):\n pth = urlparse(url).path.lstrip(\"/\")\n\n l = pth.lstrip(\"/\").split(\"/\")\n if len(l) == 0 or l[0] == \"\":\n return None\n return l[0]\n\n\ndef parse_key_name(path):\n return \"/\".join(path.rstrip(\"/\").split(\"/\")[2:])\n\n\ndef is_delete_keys(request, path, bucket_name):\n return (\n path == u'/' + bucket_name + u'/?delete' or\n path == u'/' + bucket_name + u'?delete' or\n (path == u'/' + bucket_name and\n getattr(request, \"query_string\", \"\") == \"delete\")\n )\n","path":"moto/s3bucket_path/utils.py"}],"string":"[\n {\n \"content\": \"from __future__ import unicode_literals\\nfrom six.moves.urllib.parse import urlparse\\n\\n\\ndef bucket_name_from_url(url):\\n pth = urlparse(url).path.lstrip(\\\"/\\\")\\n\\n l = pth.lstrip(\\\"/\\\").split(\\\"/\\\")\\n if len(l) == 0 or l[0] == \\\"\\\":\\n return None\\n return l[0]\\n\\n\\ndef parse_key_name(path):\\n return \\\"/\\\".join(path.rstrip(\\\"/\\\").split(\\\"/\\\")[2:])\\n\\n\\ndef is_delete_keys(request, path, bucket_name):\\n return (\\n path == u'/' + bucket_name + u'/?delete' or\\n path == u'/' + bucket_name + u'?delete' or\\n (path == u'/' + bucket_name and\\n getattr(request, \\\"query_string\\\", \\\"\\\") == \\\"delete\\\")\\n )\\n\",\n \"path\": \"moto/s3bucket_path/utils.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"from __future__ import unicode_literals\nfrom six.moves.urllib.parse import urlparse\n\n\ndef bucket_name_from_url(url):\n pth = urlparse(url).path.lstrip(\"/\")\n\n l = pth.lstrip(\"/\").split(\"/\")\n if len(l) == 0 or l[0] == \"\":\n return None\n return l[0]\n\n\ndef parse_key_name(path):\n return \"/\".join(path.split(\"/\")[2:])\n\n\ndef is_delete_keys(request, path, bucket_name):\n return (\n path == u'/' + bucket_name + u'/?delete' or\n path == u'/' + bucket_name + u'?delete' or\n (path == u'/' + bucket_name and\n getattr(request, \"query_string\", \"\") == \"delete\")\n )\n","path":"moto/s3bucket_path/utils.py"}],"string":"[\n {\n \"content\": \"from __future__ import unicode_literals\\nfrom six.moves.urllib.parse import urlparse\\n\\n\\ndef bucket_name_from_url(url):\\n pth = urlparse(url).path.lstrip(\\\"/\\\")\\n\\n l = pth.lstrip(\\\"/\\\").split(\\\"/\\\")\\n if len(l) == 0 or l[0] == \\\"\\\":\\n return None\\n return l[0]\\n\\n\\ndef parse_key_name(path):\\n return \\\"/\\\".join(path.split(\\\"/\\\")[2:])\\n\\n\\ndef is_delete_keys(request, path, bucket_name):\\n return (\\n path == u'/' + bucket_name + u'/?delete' or\\n path == u'/' + bucket_name + u'?delete' or\\n (path == u'/' + bucket_name and\\n getattr(request, \\\"query_string\\\", \\\"\\\") == \\\"delete\\\")\\n )\\n\",\n \"path\": \"moto/s3bucket_path/utils.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/moto/s3bucket_path/utils.py b/moto/s3bucket_path/utils.py\nindex aa7dc12f0961..e10e64fb6492 100644\n--- a/moto/s3bucket_path/utils.py\n+++ b/moto/s3bucket_path/utils.py\n@@ -12,7 +12,7 @@ def bucket_name_from_url(url):\n \n \n def parse_key_name(path):\n- return \"/\".join(path.rstrip(\"/\").split(\"/\")[2:])\n+ return \"/\".join(path.split(\"/\")[2:])\n \n \n def is_delete_keys(request, path, bucket_name):\ndiff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py\nindex 95a755ab13d5..5519f0c5759a 100644\n--- a/tests/test_s3/test_s3.py\n+++ b/tests/test_s3/test_s3.py\n@@ -1076,3 +1076,20 @@ def test_website_configuration_xml():\n bucket = conn.create_bucket('test-bucket')\n bucket.set_website_configuration_xml(TEST_XML)\n bucket.get_website_configuration_xml().should.equal(TEST_XML)\n+\n+\n+@mock_s3\n+def test_key_with_trailing_slash_in_ordinary_calling_format():\n+ conn = boto.connect_s3(\n+ 'access_key',\n+ 'secret_key',\n+ calling_format=boto.s3.connection.OrdinaryCallingFormat()\n+ )\n+ bucket = conn.create_bucket('test_bucket_name')\n+\n+ key_name = 'key_with_slash/'\n+\n+ key = Key(bucket, key_name)\n+ key.set_contents_from_string('some value')\n+\n+ [k.name for k in bucket.get_all_keys()].should.contain(key_name)\n"}}},{"rowIdx":435,"cells":{"in_source_id":{"kind":"string","value":"pyca__cryptography-7106"},"issue":{"kind":"string","value":"adding custom attribute that have octetstring parameter\nlooks like I still can't put an octetstring attribute even if I used _tag in addattribute function that will be added in version 37 because _ASN1Type doesn't have octetstring(tag 04) in it.\r\n\r\n(PS: why it needs some whitelist for allowed tag types?)\r\n\r\nattribute I wanted to add: \r\nhttps://cabforum.org/wp-content/uploads/CA-Browser-Forum-BR-1.8.2.pdf\r\nappendix B, 2-b\r\n\r\ncabf OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) international-organizations(23) ca-browser-forum(140) }\r\n\r\ncaSigningNonce ATTRIBUTE ::= {\r\n WITH SYNTAX OCTET STRING\r\n EQUALITY MATCHING RULE octetStringMatch\r\n SINGLE VALUE TRUE\r\n ID { cabf-caSigningNonce }\r\n}\r\n\r\n\r\ncabf-caSigningNonce OBJECT IDENTIFIER ::= { cabf 41 }\r\n\r\n\r\nhttps://github.com/pyca/cryptography/pull/7038\r\n\r\ntested locally by editing name.py, rust part of code doesn't complain. \n"},"before_files":{"kind":"list like","value":[{"content":"# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport binascii\nimport re\nimport sys\nimport typing\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.hazmat.bindings._rust import (\n x509 as rust_x509,\n)\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\n\n\nclass _ASN1Type(utils.Enum):\n BitString = 3\n UTF8String = 12\n NumericString = 18\n PrintableString = 19\n T61String = 20\n IA5String = 22\n UTCTime = 23\n GeneralizedTime = 24\n VisibleString = 26\n UniversalString = 28\n BMPString = 30\n\n\n_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}\n_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {\n NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,\n NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,\n NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,\n NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,\n}\n\n# Type alias\n_OidNameMap = typing.Mapping[ObjectIdentifier, str]\n\n#: Short attribute names from RFC 4514:\n#: https://tools.ietf.org/html/rfc4514#page-7\n_NAMEOID_TO_NAME: _OidNameMap = {\n NameOID.COMMON_NAME: \"CN\",\n NameOID.LOCALITY_NAME: \"L\",\n NameOID.STATE_OR_PROVINCE_NAME: \"ST\",\n NameOID.ORGANIZATION_NAME: \"O\",\n NameOID.ORGANIZATIONAL_UNIT_NAME: \"OU\",\n NameOID.COUNTRY_NAME: \"C\",\n NameOID.STREET_ADDRESS: \"STREET\",\n NameOID.DOMAIN_COMPONENT: \"DC\",\n NameOID.USER_ID: \"UID\",\n}\n_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}\n\n\ndef _escape_dn_value(val: typing.Union[str, bytes]) -> str:\n \"\"\"Escape special characters in RFC4514 Distinguished Name value.\"\"\"\n\n if not val:\n return \"\"\n\n # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character\n # followed by the hexadecimal encoding of the octets.\n if isinstance(val, bytes):\n return \"#\" + binascii.hexlify(val).decode(\"utf8\")\n\n # See https://tools.ietf.org/html/rfc4514#section-2.4\n val = val.replace(\"\\\\\", \"\\\\\\\\\")\n val = val.replace('\"', '\\\\\"')\n val = val.replace(\"+\", \"\\\\+\")\n val = val.replace(\",\", \"\\\\,\")\n val = val.replace(\";\", \"\\\\;\")\n val = val.replace(\"<\", \"\\\\<\")\n val = val.replace(\">\", \"\\\\>\")\n val = val.replace(\"\\0\", \"\\\\00\")\n\n if val[0] in (\"#\", \" \"):\n val = \"\\\\\" + val\n if val[-1] == \" \":\n val = val[:-1] + \"\\\\ \"\n\n return val\n\n\ndef _unescape_dn_value(val: str) -> str:\n if not val:\n return \"\"\n\n # See https://tools.ietf.org/html/rfc4514#section-3\n\n # special = escaped / SPACE / SHARP / EQUALS\n # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE\n def sub(m):\n val = m.group(1)\n # Regular escape\n if len(val) == 1:\n return val\n # Hex-value scape\n return chr(int(val, 16))\n\n return _RFC4514NameParser._PAIR_RE.sub(sub, val)\n\n\nclass NameAttribute:\n def __init__(\n self,\n oid: ObjectIdentifier,\n value: typing.Union[str, bytes],\n _type: typing.Optional[_ASN1Type] = None,\n *,\n _validate: bool = True,\n ) -> None:\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n if _type == _ASN1Type.BitString:\n if oid != NameOID.X500_UNIQUE_IDENTIFIER:\n raise TypeError(\n \"oid must be X500_UNIQUE_IDENTIFIER for BitString type.\"\n )\n if not isinstance(value, bytes):\n raise TypeError(\"value must be bytes for BitString\")\n else:\n if not isinstance(value, str):\n raise TypeError(\"value argument must be a str\")\n\n if (\n oid == NameOID.COUNTRY_NAME\n or oid == NameOID.JURISDICTION_COUNTRY_NAME\n ):\n assert isinstance(value, str)\n c_len = len(value.encode(\"utf8\"))\n if c_len != 2 and _validate is True:\n raise ValueError(\n \"Country name must be a 2 character country code\"\n )\n elif c_len != 2:\n warnings.warn(\n \"Country names should be two characters, but the \"\n \"attribute is {} characters in length.\".format(c_len),\n stacklevel=2,\n )\n\n # The appropriate ASN1 string type varies by OID and is defined across\n # multiple RFCs including 2459, 3280, and 5280. In general UTF8String\n # is preferred (2459), but 3280 and 5280 specify several OIDs with\n # alternate types. This means when we see the sentinel value we need\n # to look up whether the OID has a non-UTF8 type. If it does, set it\n # to that. Otherwise, UTF8!\n if _type is None:\n _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)\n\n if not isinstance(_type, _ASN1Type):\n raise TypeError(\"_type must be from the _ASN1Type enum\")\n\n self._oid = oid\n self._value = value\n self._type = _type\n\n @property\n def oid(self) -> ObjectIdentifier:\n return self._oid\n\n @property\n def value(self) -> typing.Union[str, bytes]:\n return self._value\n\n @property\n def rfc4514_attribute_name(self) -> str:\n \"\"\"\n The short attribute name (for example \"CN\") if available,\n otherwise the OID dotted string.\n \"\"\"\n return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Use short attribute name if available, otherwise fall back to OID\n dotted string.\n \"\"\"\n attr_name = (\n attr_name_overrides.get(self.oid) if attr_name_overrides else None\n )\n if attr_name is None:\n attr_name = self.rfc4514_attribute_name\n\n return f\"{attr_name}={_escape_dn_value(self.value)}\"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return self.oid == other.oid and self.value == other.value\n\n def __hash__(self) -> int:\n return hash((self.oid, self.value))\n\n def __repr__(self) -> str:\n return \"\".format(self)\n\n\nclass RelativeDistinguishedName:\n def __init__(self, attributes: typing.Iterable[NameAttribute]):\n attributes = list(attributes)\n if not attributes:\n raise ValueError(\"a relative distinguished name cannot be empty\")\n if not all(isinstance(x, NameAttribute) for x in attributes):\n raise TypeError(\"attributes must be an iterable of NameAttribute\")\n\n # Keep list and frozenset to preserve attribute order where it matters\n self._attributes = attributes\n self._attribute_set = frozenset(attributes)\n\n if len(self._attribute_set) != len(attributes):\n raise ValueError(\"duplicate attributes are not allowed\")\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Within each RDN, attributes are joined by '+', although that is rarely\n used in certificates.\n \"\"\"\n return \"+\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in self._attributes\n )\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, RelativeDistinguishedName):\n return NotImplemented\n\n return self._attribute_set == other._attribute_set\n\n def __hash__(self) -> int:\n return hash(self._attribute_set)\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n return iter(self._attributes)\n\n def __len__(self) -> int:\n return len(self._attributes)\n\n def __repr__(self) -> str:\n return \"\".format(self.rfc4514_string())\n\n\nclass Name:\n @typing.overload\n def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:\n ...\n\n @typing.overload\n def __init__(\n self, attributes: typing.Iterable[RelativeDistinguishedName]\n ) -> None:\n ...\n\n def __init__(\n self,\n attributes: typing.Iterable[\n typing.Union[NameAttribute, RelativeDistinguishedName]\n ],\n ) -> None:\n attributes = list(attributes)\n if all(isinstance(x, NameAttribute) for x in attributes):\n self._attributes = [\n RelativeDistinguishedName([typing.cast(NameAttribute, x)])\n for x in attributes\n ]\n elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):\n self._attributes = typing.cast(\n typing.List[RelativeDistinguishedName], attributes\n )\n else:\n raise TypeError(\n \"attributes must be a list of NameAttribute\"\n \" or a list RelativeDistinguishedName\"\n )\n\n @classmethod\n def from_rfc4514_string(cls, data: str) -> \"Name\":\n return _RFC4514NameParser(data).parse()\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n For example 'CN=foobar.com,O=Foo Corp,C=US'\n\n An X.509 name is a two-level structure: a list of sets of attributes.\n Each list element is separated by ',' and within each list element, set\n elements are separated by '+'. The latter is almost never used in\n real world certificates. According to RFC4514 section 2.1 the\n RDNSequence must be reversed when converting to string representation.\n \"\"\"\n return \",\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in reversed(self._attributes)\n )\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n @property\n def rdns(self) -> typing.List[RelativeDistinguishedName]:\n return self._attributes\n\n def public_bytes(self, backend: typing.Any = None) -> bytes:\n return rust_x509.encode_name_bytes(self)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __hash__(self) -> int:\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n for rdn in self._attributes:\n for ava in rdn:\n yield ava\n\n def __len__(self) -> int:\n return sum(len(rdn) for rdn in self._attributes)\n\n def __repr__(self) -> str:\n rdns = \",\".join(attr.rfc4514_string() for attr in self._attributes)\n return \"\".format(rdns)\n\n\nclass _RFC4514NameParser:\n _OID_RE = re.compile(r\"(0|([1-9]\\d*))(\\.(0|([1-9]\\d*)))+\")\n _DESCR_RE = re.compile(r\"[a-zA-Z][a-zA-Z\\d-]*\")\n\n _PAIR = r\"\\\\([\\\\ #=\\\"\\+,;<>]|[\\da-zA-Z]{2})\"\n _PAIR_RE = re.compile(_PAIR)\n _LUTF1 = r\"[\\x01-\\x1f\\x21\\x24-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _SUTF1 = r\"[\\x01-\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _TUTF1 = r\"[\\x01-\\x1F\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _UTFMB = rf\"[\\x80-{chr(sys.maxunicode)}]\"\n _LEADCHAR = rf\"{_LUTF1}|{_UTFMB}\"\n _STRINGCHAR = rf\"{_SUTF1}|{_UTFMB}\"\n _TRAILCHAR = rf\"{_TUTF1}|{_UTFMB}\"\n _STRING_RE = re.compile(\n rf\"\"\"\n (\n ({_LEADCHAR}|{_PAIR})\n (\n ({_STRINGCHAR}|{_PAIR})*\n ({_TRAILCHAR}|{_PAIR})\n )?\n )?\n \"\"\",\n re.VERBOSE,\n )\n _HEXSTRING_RE = re.compile(r\"#([\\da-zA-Z]{2})+\")\n\n def __init__(self, data: str) -> None:\n self._data = data\n self._idx = 0\n\n def _has_data(self) -> bool:\n return self._idx < len(self._data)\n\n def _peek(self) -> typing.Optional[str]:\n if self._has_data():\n return self._data[self._idx]\n return None\n\n def _read_char(self, ch: str) -> None:\n if self._peek() != ch:\n raise ValueError\n self._idx += 1\n\n def _read_re(self, pat) -> str:\n match = pat.match(self._data, pos=self._idx)\n if match is None:\n raise ValueError\n val = match.group()\n self._idx += len(val)\n return val\n\n def parse(self) -> Name:\n rdns = [self._parse_rdn()]\n\n while self._has_data():\n self._read_char(\",\")\n rdns.append(self._parse_rdn())\n\n return Name(rdns)\n\n def _parse_rdn(self) -> RelativeDistinguishedName:\n nas = [self._parse_na()]\n while self._peek() == \"+\":\n self._read_char(\"+\")\n nas.append(self._parse_na())\n\n return RelativeDistinguishedName(nas)\n\n def _parse_na(self) -> NameAttribute:\n try:\n oid_value = self._read_re(self._OID_RE)\n except ValueError:\n name = self._read_re(self._DESCR_RE)\n oid = _NAME_TO_NAMEOID.get(name)\n if oid is None:\n raise ValueError\n else:\n oid = ObjectIdentifier(oid_value)\n\n self._read_char(\"=\")\n if self._peek() == \"#\":\n value = self._read_re(self._HEXSTRING_RE)\n value = binascii.unhexlify(value[1:]).decode()\n else:\n raw_value = self._read_re(self._STRING_RE)\n value = _unescape_dn_value(raw_value)\n\n return NameAttribute(oid, value)\n","path":"src/cryptography/x509/name.py"}],"string":"[\n {\n \"content\": \"# This file is dual licensed under the terms of the Apache License, Version\\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\\n# for complete details.\\n\\nimport binascii\\nimport re\\nimport sys\\nimport typing\\nimport warnings\\n\\nfrom cryptography import utils\\nfrom cryptography.hazmat.bindings._rust import (\\n x509 as rust_x509,\\n)\\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\\n\\n\\nclass _ASN1Type(utils.Enum):\\n BitString = 3\\n UTF8String = 12\\n NumericString = 18\\n PrintableString = 19\\n T61String = 20\\n IA5String = 22\\n UTCTime = 23\\n GeneralizedTime = 24\\n VisibleString = 26\\n UniversalString = 28\\n BMPString = 30\\n\\n\\n_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}\\n_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {\\n NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,\\n NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,\\n NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,\\n NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,\\n NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,\\n NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,\\n}\\n\\n# Type alias\\n_OidNameMap = typing.Mapping[ObjectIdentifier, str]\\n\\n#: Short attribute names from RFC 4514:\\n#: https://tools.ietf.org/html/rfc4514#page-7\\n_NAMEOID_TO_NAME: _OidNameMap = {\\n NameOID.COMMON_NAME: \\\"CN\\\",\\n NameOID.LOCALITY_NAME: \\\"L\\\",\\n NameOID.STATE_OR_PROVINCE_NAME: \\\"ST\\\",\\n NameOID.ORGANIZATION_NAME: \\\"O\\\",\\n NameOID.ORGANIZATIONAL_UNIT_NAME: \\\"OU\\\",\\n NameOID.COUNTRY_NAME: \\\"C\\\",\\n NameOID.STREET_ADDRESS: \\\"STREET\\\",\\n NameOID.DOMAIN_COMPONENT: \\\"DC\\\",\\n NameOID.USER_ID: \\\"UID\\\",\\n}\\n_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}\\n\\n\\ndef _escape_dn_value(val: typing.Union[str, bytes]) -> str:\\n \\\"\\\"\\\"Escape special characters in RFC4514 Distinguished Name value.\\\"\\\"\\\"\\n\\n if not val:\\n return \\\"\\\"\\n\\n # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character\\n # followed by the hexadecimal encoding of the octets.\\n if isinstance(val, bytes):\\n return \\\"#\\\" + binascii.hexlify(val).decode(\\\"utf8\\\")\\n\\n # See https://tools.ietf.org/html/rfc4514#section-2.4\\n val = val.replace(\\\"\\\\\\\\\\\", \\\"\\\\\\\\\\\\\\\\\\\")\\n val = val.replace('\\\"', '\\\\\\\\\\\"')\\n val = val.replace(\\\"+\\\", \\\"\\\\\\\\+\\\")\\n val = val.replace(\\\",\\\", \\\"\\\\\\\\,\\\")\\n val = val.replace(\\\";\\\", \\\"\\\\\\\\;\\\")\\n val = val.replace(\\\"<\\\", \\\"\\\\\\\\<\\\")\\n val = val.replace(\\\">\\\", \\\"\\\\\\\\>\\\")\\n val = val.replace(\\\"\\\\0\\\", \\\"\\\\\\\\00\\\")\\n\\n if val[0] in (\\\"#\\\", \\\" \\\"):\\n val = \\\"\\\\\\\\\\\" + val\\n if val[-1] == \\\" \\\":\\n val = val[:-1] + \\\"\\\\\\\\ \\\"\\n\\n return val\\n\\n\\ndef _unescape_dn_value(val: str) -> str:\\n if not val:\\n return \\\"\\\"\\n\\n # See https://tools.ietf.org/html/rfc4514#section-3\\n\\n # special = escaped / SPACE / SHARP / EQUALS\\n # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE\\n def sub(m):\\n val = m.group(1)\\n # Regular escape\\n if len(val) == 1:\\n return val\\n # Hex-value scape\\n return chr(int(val, 16))\\n\\n return _RFC4514NameParser._PAIR_RE.sub(sub, val)\\n\\n\\nclass NameAttribute:\\n def __init__(\\n self,\\n oid: ObjectIdentifier,\\n value: typing.Union[str, bytes],\\n _type: typing.Optional[_ASN1Type] = None,\\n *,\\n _validate: bool = True,\\n ) -> None:\\n if not isinstance(oid, ObjectIdentifier):\\n raise TypeError(\\n \\\"oid argument must be an ObjectIdentifier instance.\\\"\\n )\\n if _type == _ASN1Type.BitString:\\n if oid != NameOID.X500_UNIQUE_IDENTIFIER:\\n raise TypeError(\\n \\\"oid must be X500_UNIQUE_IDENTIFIER for BitString type.\\\"\\n )\\n if not isinstance(value, bytes):\\n raise TypeError(\\\"value must be bytes for BitString\\\")\\n else:\\n if not isinstance(value, str):\\n raise TypeError(\\\"value argument must be a str\\\")\\n\\n if (\\n oid == NameOID.COUNTRY_NAME\\n or oid == NameOID.JURISDICTION_COUNTRY_NAME\\n ):\\n assert isinstance(value, str)\\n c_len = len(value.encode(\\\"utf8\\\"))\\n if c_len != 2 and _validate is True:\\n raise ValueError(\\n \\\"Country name must be a 2 character country code\\\"\\n )\\n elif c_len != 2:\\n warnings.warn(\\n \\\"Country names should be two characters, but the \\\"\\n \\\"attribute is {} characters in length.\\\".format(c_len),\\n stacklevel=2,\\n )\\n\\n # The appropriate ASN1 string type varies by OID and is defined across\\n # multiple RFCs including 2459, 3280, and 5280. In general UTF8String\\n # is preferred (2459), but 3280 and 5280 specify several OIDs with\\n # alternate types. This means when we see the sentinel value we need\\n # to look up whether the OID has a non-UTF8 type. If it does, set it\\n # to that. Otherwise, UTF8!\\n if _type is None:\\n _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)\\n\\n if not isinstance(_type, _ASN1Type):\\n raise TypeError(\\\"_type must be from the _ASN1Type enum\\\")\\n\\n self._oid = oid\\n self._value = value\\n self._type = _type\\n\\n @property\\n def oid(self) -> ObjectIdentifier:\\n return self._oid\\n\\n @property\\n def value(self) -> typing.Union[str, bytes]:\\n return self._value\\n\\n @property\\n def rfc4514_attribute_name(self) -> str:\\n \\\"\\\"\\\"\\n The short attribute name (for example \\\"CN\\\") if available,\\n otherwise the OID dotted string.\\n \\\"\\\"\\\"\\n return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)\\n\\n def rfc4514_string(\\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\\n ) -> str:\\n \\\"\\\"\\\"\\n Format as RFC4514 Distinguished Name string.\\n\\n Use short attribute name if available, otherwise fall back to OID\\n dotted string.\\n \\\"\\\"\\\"\\n attr_name = (\\n attr_name_overrides.get(self.oid) if attr_name_overrides else None\\n )\\n if attr_name is None:\\n attr_name = self.rfc4514_attribute_name\\n\\n return f\\\"{attr_name}={_escape_dn_value(self.value)}\\\"\\n\\n def __eq__(self, other: object) -> bool:\\n if not isinstance(other, NameAttribute):\\n return NotImplemented\\n\\n return self.oid == other.oid and self.value == other.value\\n\\n def __hash__(self) -> int:\\n return hash((self.oid, self.value))\\n\\n def __repr__(self) -> str:\\n return \\\"\\\".format(self)\\n\\n\\nclass RelativeDistinguishedName:\\n def __init__(self, attributes: typing.Iterable[NameAttribute]):\\n attributes = list(attributes)\\n if not attributes:\\n raise ValueError(\\\"a relative distinguished name cannot be empty\\\")\\n if not all(isinstance(x, NameAttribute) for x in attributes):\\n raise TypeError(\\\"attributes must be an iterable of NameAttribute\\\")\\n\\n # Keep list and frozenset to preserve attribute order where it matters\\n self._attributes = attributes\\n self._attribute_set = frozenset(attributes)\\n\\n if len(self._attribute_set) != len(attributes):\\n raise ValueError(\\\"duplicate attributes are not allowed\\\")\\n\\n def get_attributes_for_oid(\\n self, oid: ObjectIdentifier\\n ) -> typing.List[NameAttribute]:\\n return [i for i in self if i.oid == oid]\\n\\n def rfc4514_string(\\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\\n ) -> str:\\n \\\"\\\"\\\"\\n Format as RFC4514 Distinguished Name string.\\n\\n Within each RDN, attributes are joined by '+', although that is rarely\\n used in certificates.\\n \\\"\\\"\\\"\\n return \\\"+\\\".join(\\n attr.rfc4514_string(attr_name_overrides)\\n for attr in self._attributes\\n )\\n\\n def __eq__(self, other: object) -> bool:\\n if not isinstance(other, RelativeDistinguishedName):\\n return NotImplemented\\n\\n return self._attribute_set == other._attribute_set\\n\\n def __hash__(self) -> int:\\n return hash(self._attribute_set)\\n\\n def __iter__(self) -> typing.Iterator[NameAttribute]:\\n return iter(self._attributes)\\n\\n def __len__(self) -> int:\\n return len(self._attributes)\\n\\n def __repr__(self) -> str:\\n return \\\"\\\".format(self.rfc4514_string())\\n\\n\\nclass Name:\\n @typing.overload\\n def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:\\n ...\\n\\n @typing.overload\\n def __init__(\\n self, attributes: typing.Iterable[RelativeDistinguishedName]\\n ) -> None:\\n ...\\n\\n def __init__(\\n self,\\n attributes: typing.Iterable[\\n typing.Union[NameAttribute, RelativeDistinguishedName]\\n ],\\n ) -> None:\\n attributes = list(attributes)\\n if all(isinstance(x, NameAttribute) for x in attributes):\\n self._attributes = [\\n RelativeDistinguishedName([typing.cast(NameAttribute, x)])\\n for x in attributes\\n ]\\n elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):\\n self._attributes = typing.cast(\\n typing.List[RelativeDistinguishedName], attributes\\n )\\n else:\\n raise TypeError(\\n \\\"attributes must be a list of NameAttribute\\\"\\n \\\" or a list RelativeDistinguishedName\\\"\\n )\\n\\n @classmethod\\n def from_rfc4514_string(cls, data: str) -> \\\"Name\\\":\\n return _RFC4514NameParser(data).parse()\\n\\n def rfc4514_string(\\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\\n ) -> str:\\n \\\"\\\"\\\"\\n Format as RFC4514 Distinguished Name string.\\n For example 'CN=foobar.com,O=Foo Corp,C=US'\\n\\n An X.509 name is a two-level structure: a list of sets of attributes.\\n Each list element is separated by ',' and within each list element, set\\n elements are separated by '+'. The latter is almost never used in\\n real world certificates. According to RFC4514 section 2.1 the\\n RDNSequence must be reversed when converting to string representation.\\n \\\"\\\"\\\"\\n return \\\",\\\".join(\\n attr.rfc4514_string(attr_name_overrides)\\n for attr in reversed(self._attributes)\\n )\\n\\n def get_attributes_for_oid(\\n self, oid: ObjectIdentifier\\n ) -> typing.List[NameAttribute]:\\n return [i for i in self if i.oid == oid]\\n\\n @property\\n def rdns(self) -> typing.List[RelativeDistinguishedName]:\\n return self._attributes\\n\\n def public_bytes(self, backend: typing.Any = None) -> bytes:\\n return rust_x509.encode_name_bytes(self)\\n\\n def __eq__(self, other: object) -> bool:\\n if not isinstance(other, Name):\\n return NotImplemented\\n\\n return self._attributes == other._attributes\\n\\n def __hash__(self) -> int:\\n # TODO: this is relatively expensive, if this looks like a bottleneck\\n # for you, consider optimizing!\\n return hash(tuple(self._attributes))\\n\\n def __iter__(self) -> typing.Iterator[NameAttribute]:\\n for rdn in self._attributes:\\n for ava in rdn:\\n yield ava\\n\\n def __len__(self) -> int:\\n return sum(len(rdn) for rdn in self._attributes)\\n\\n def __repr__(self) -> str:\\n rdns = \\\",\\\".join(attr.rfc4514_string() for attr in self._attributes)\\n return \\\"\\\".format(rdns)\\n\\n\\nclass _RFC4514NameParser:\\n _OID_RE = re.compile(r\\\"(0|([1-9]\\\\d*))(\\\\.(0|([1-9]\\\\d*)))+\\\")\\n _DESCR_RE = re.compile(r\\\"[a-zA-Z][a-zA-Z\\\\d-]*\\\")\\n\\n _PAIR = r\\\"\\\\\\\\([\\\\\\\\ #=\\\\\\\"\\\\+,;<>]|[\\\\da-zA-Z]{2})\\\"\\n _PAIR_RE = re.compile(_PAIR)\\n _LUTF1 = r\\\"[\\\\x01-\\\\x1f\\\\x21\\\\x24-\\\\x2A\\\\x2D-\\\\x3A\\\\x3D\\\\x3F-\\\\x5B\\\\x5D-\\\\x7F]\\\"\\n _SUTF1 = r\\\"[\\\\x01-\\\\x21\\\\x23-\\\\x2A\\\\x2D-\\\\x3A\\\\x3D\\\\x3F-\\\\x5B\\\\x5D-\\\\x7F]\\\"\\n _TUTF1 = r\\\"[\\\\x01-\\\\x1F\\\\x21\\\\x23-\\\\x2A\\\\x2D-\\\\x3A\\\\x3D\\\\x3F-\\\\x5B\\\\x5D-\\\\x7F]\\\"\\n _UTFMB = rf\\\"[\\\\x80-{chr(sys.maxunicode)}]\\\"\\n _LEADCHAR = rf\\\"{_LUTF1}|{_UTFMB}\\\"\\n _STRINGCHAR = rf\\\"{_SUTF1}|{_UTFMB}\\\"\\n _TRAILCHAR = rf\\\"{_TUTF1}|{_UTFMB}\\\"\\n _STRING_RE = re.compile(\\n rf\\\"\\\"\\\"\\n (\\n ({_LEADCHAR}|{_PAIR})\\n (\\n ({_STRINGCHAR}|{_PAIR})*\\n ({_TRAILCHAR}|{_PAIR})\\n )?\\n )?\\n \\\"\\\"\\\",\\n re.VERBOSE,\\n )\\n _HEXSTRING_RE = re.compile(r\\\"#([\\\\da-zA-Z]{2})+\\\")\\n\\n def __init__(self, data: str) -> None:\\n self._data = data\\n self._idx = 0\\n\\n def _has_data(self) -> bool:\\n return self._idx < len(self._data)\\n\\n def _peek(self) -> typing.Optional[str]:\\n if self._has_data():\\n return self._data[self._idx]\\n return None\\n\\n def _read_char(self, ch: str) -> None:\\n if self._peek() != ch:\\n raise ValueError\\n self._idx += 1\\n\\n def _read_re(self, pat) -> str:\\n match = pat.match(self._data, pos=self._idx)\\n if match is None:\\n raise ValueError\\n val = match.group()\\n self._idx += len(val)\\n return val\\n\\n def parse(self) -> Name:\\n rdns = [self._parse_rdn()]\\n\\n while self._has_data():\\n self._read_char(\\\",\\\")\\n rdns.append(self._parse_rdn())\\n\\n return Name(rdns)\\n\\n def _parse_rdn(self) -> RelativeDistinguishedName:\\n nas = [self._parse_na()]\\n while self._peek() == \\\"+\\\":\\n self._read_char(\\\"+\\\")\\n nas.append(self._parse_na())\\n\\n return RelativeDistinguishedName(nas)\\n\\n def _parse_na(self) -> NameAttribute:\\n try:\\n oid_value = self._read_re(self._OID_RE)\\n except ValueError:\\n name = self._read_re(self._DESCR_RE)\\n oid = _NAME_TO_NAMEOID.get(name)\\n if oid is None:\\n raise ValueError\\n else:\\n oid = ObjectIdentifier(oid_value)\\n\\n self._read_char(\\\"=\\\")\\n if self._peek() == \\\"#\\\":\\n value = self._read_re(self._HEXSTRING_RE)\\n value = binascii.unhexlify(value[1:]).decode()\\n else:\\n raw_value = self._read_re(self._STRING_RE)\\n value = _unescape_dn_value(raw_value)\\n\\n return NameAttribute(oid, value)\\n\",\n \"path\": \"src/cryptography/x509/name.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport binascii\nimport re\nimport sys\nimport typing\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.hazmat.bindings._rust import (\n x509 as rust_x509,\n)\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\n\n\nclass _ASN1Type(utils.Enum):\n BitString = 3\n OctetString = 4\n UTF8String = 12\n NumericString = 18\n PrintableString = 19\n T61String = 20\n IA5String = 22\n UTCTime = 23\n GeneralizedTime = 24\n VisibleString = 26\n UniversalString = 28\n BMPString = 30\n\n\n_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}\n_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {\n NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,\n NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,\n NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,\n NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,\n}\n\n# Type alias\n_OidNameMap = typing.Mapping[ObjectIdentifier, str]\n\n#: Short attribute names from RFC 4514:\n#: https://tools.ietf.org/html/rfc4514#page-7\n_NAMEOID_TO_NAME: _OidNameMap = {\n NameOID.COMMON_NAME: \"CN\",\n NameOID.LOCALITY_NAME: \"L\",\n NameOID.STATE_OR_PROVINCE_NAME: \"ST\",\n NameOID.ORGANIZATION_NAME: \"O\",\n NameOID.ORGANIZATIONAL_UNIT_NAME: \"OU\",\n NameOID.COUNTRY_NAME: \"C\",\n NameOID.STREET_ADDRESS: \"STREET\",\n NameOID.DOMAIN_COMPONENT: \"DC\",\n NameOID.USER_ID: \"UID\",\n}\n_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}\n\n\ndef _escape_dn_value(val: typing.Union[str, bytes]) -> str:\n \"\"\"Escape special characters in RFC4514 Distinguished Name value.\"\"\"\n\n if not val:\n return \"\"\n\n # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character\n # followed by the hexadecimal encoding of the octets.\n if isinstance(val, bytes):\n return \"#\" + binascii.hexlify(val).decode(\"utf8\")\n\n # See https://tools.ietf.org/html/rfc4514#section-2.4\n val = val.replace(\"\\\\\", \"\\\\\\\\\")\n val = val.replace('\"', '\\\\\"')\n val = val.replace(\"+\", \"\\\\+\")\n val = val.replace(\",\", \"\\\\,\")\n val = val.replace(\";\", \"\\\\;\")\n val = val.replace(\"<\", \"\\\\<\")\n val = val.replace(\">\", \"\\\\>\")\n val = val.replace(\"\\0\", \"\\\\00\")\n\n if val[0] in (\"#\", \" \"):\n val = \"\\\\\" + val\n if val[-1] == \" \":\n val = val[:-1] + \"\\\\ \"\n\n return val\n\n\ndef _unescape_dn_value(val: str) -> str:\n if not val:\n return \"\"\n\n # See https://tools.ietf.org/html/rfc4514#section-3\n\n # special = escaped / SPACE / SHARP / EQUALS\n # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE\n def sub(m):\n val = m.group(1)\n # Regular escape\n if len(val) == 1:\n return val\n # Hex-value scape\n return chr(int(val, 16))\n\n return _RFC4514NameParser._PAIR_RE.sub(sub, val)\n\n\nclass NameAttribute:\n def __init__(\n self,\n oid: ObjectIdentifier,\n value: typing.Union[str, bytes],\n _type: typing.Optional[_ASN1Type] = None,\n *,\n _validate: bool = True,\n ) -> None:\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n if _type == _ASN1Type.BitString:\n if oid != NameOID.X500_UNIQUE_IDENTIFIER:\n raise TypeError(\n \"oid must be X500_UNIQUE_IDENTIFIER for BitString type.\"\n )\n if not isinstance(value, bytes):\n raise TypeError(\"value must be bytes for BitString\")\n else:\n if not isinstance(value, str):\n raise TypeError(\"value argument must be a str\")\n\n if (\n oid == NameOID.COUNTRY_NAME\n or oid == NameOID.JURISDICTION_COUNTRY_NAME\n ):\n assert isinstance(value, str)\n c_len = len(value.encode(\"utf8\"))\n if c_len != 2 and _validate is True:\n raise ValueError(\n \"Country name must be a 2 character country code\"\n )\n elif c_len != 2:\n warnings.warn(\n \"Country names should be two characters, but the \"\n \"attribute is {} characters in length.\".format(c_len),\n stacklevel=2,\n )\n\n # The appropriate ASN1 string type varies by OID and is defined across\n # multiple RFCs including 2459, 3280, and 5280. In general UTF8String\n # is preferred (2459), but 3280 and 5280 specify several OIDs with\n # alternate types. This means when we see the sentinel value we need\n # to look up whether the OID has a non-UTF8 type. If it does, set it\n # to that. Otherwise, UTF8!\n if _type is None:\n _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)\n\n if not isinstance(_type, _ASN1Type):\n raise TypeError(\"_type must be from the _ASN1Type enum\")\n\n self._oid = oid\n self._value = value\n self._type = _type\n\n @property\n def oid(self) -> ObjectIdentifier:\n return self._oid\n\n @property\n def value(self) -> typing.Union[str, bytes]:\n return self._value\n\n @property\n def rfc4514_attribute_name(self) -> str:\n \"\"\"\n The short attribute name (for example \"CN\") if available,\n otherwise the OID dotted string.\n \"\"\"\n return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Use short attribute name if available, otherwise fall back to OID\n dotted string.\n \"\"\"\n attr_name = (\n attr_name_overrides.get(self.oid) if attr_name_overrides else None\n )\n if attr_name is None:\n attr_name = self.rfc4514_attribute_name\n\n return f\"{attr_name}={_escape_dn_value(self.value)}\"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return self.oid == other.oid and self.value == other.value\n\n def __hash__(self) -> int:\n return hash((self.oid, self.value))\n\n def __repr__(self) -> str:\n return \"\".format(self)\n\n\nclass RelativeDistinguishedName:\n def __init__(self, attributes: typing.Iterable[NameAttribute]):\n attributes = list(attributes)\n if not attributes:\n raise ValueError(\"a relative distinguished name cannot be empty\")\n if not all(isinstance(x, NameAttribute) for x in attributes):\n raise TypeError(\"attributes must be an iterable of NameAttribute\")\n\n # Keep list and frozenset to preserve attribute order where it matters\n self._attributes = attributes\n self._attribute_set = frozenset(attributes)\n\n if len(self._attribute_set) != len(attributes):\n raise ValueError(\"duplicate attributes are not allowed\")\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Within each RDN, attributes are joined by '+', although that is rarely\n used in certificates.\n \"\"\"\n return \"+\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in self._attributes\n )\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, RelativeDistinguishedName):\n return NotImplemented\n\n return self._attribute_set == other._attribute_set\n\n def __hash__(self) -> int:\n return hash(self._attribute_set)\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n return iter(self._attributes)\n\n def __len__(self) -> int:\n return len(self._attributes)\n\n def __repr__(self) -> str:\n return \"\".format(self.rfc4514_string())\n\n\nclass Name:\n @typing.overload\n def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:\n ...\n\n @typing.overload\n def __init__(\n self, attributes: typing.Iterable[RelativeDistinguishedName]\n ) -> None:\n ...\n\n def __init__(\n self,\n attributes: typing.Iterable[\n typing.Union[NameAttribute, RelativeDistinguishedName]\n ],\n ) -> None:\n attributes = list(attributes)\n if all(isinstance(x, NameAttribute) for x in attributes):\n self._attributes = [\n RelativeDistinguishedName([typing.cast(NameAttribute, x)])\n for x in attributes\n ]\n elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):\n self._attributes = typing.cast(\n typing.List[RelativeDistinguishedName], attributes\n )\n else:\n raise TypeError(\n \"attributes must be a list of NameAttribute\"\n \" or a list RelativeDistinguishedName\"\n )\n\n @classmethod\n def from_rfc4514_string(cls, data: str) -> \"Name\":\n return _RFC4514NameParser(data).parse()\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n For example 'CN=foobar.com,O=Foo Corp,C=US'\n\n An X.509 name is a two-level structure: a list of sets of attributes.\n Each list element is separated by ',' and within each list element, set\n elements are separated by '+'. The latter is almost never used in\n real world certificates. According to RFC4514 section 2.1 the\n RDNSequence must be reversed when converting to string representation.\n \"\"\"\n return \",\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in reversed(self._attributes)\n )\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n @property\n def rdns(self) -> typing.List[RelativeDistinguishedName]:\n return self._attributes\n\n def public_bytes(self, backend: typing.Any = None) -> bytes:\n return rust_x509.encode_name_bytes(self)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __hash__(self) -> int:\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n for rdn in self._attributes:\n for ava in rdn:\n yield ava\n\n def __len__(self) -> int:\n return sum(len(rdn) for rdn in self._attributes)\n\n def __repr__(self) -> str:\n rdns = \",\".join(attr.rfc4514_string() for attr in self._attributes)\n return \"\".format(rdns)\n\n\nclass _RFC4514NameParser:\n _OID_RE = re.compile(r\"(0|([1-9]\\d*))(\\.(0|([1-9]\\d*)))+\")\n _DESCR_RE = re.compile(r\"[a-zA-Z][a-zA-Z\\d-]*\")\n\n _PAIR = r\"\\\\([\\\\ #=\\\"\\+,;<>]|[\\da-zA-Z]{2})\"\n _PAIR_RE = re.compile(_PAIR)\n _LUTF1 = r\"[\\x01-\\x1f\\x21\\x24-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _SUTF1 = r\"[\\x01-\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _TUTF1 = r\"[\\x01-\\x1F\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _UTFMB = rf\"[\\x80-{chr(sys.maxunicode)}]\"\n _LEADCHAR = rf\"{_LUTF1}|{_UTFMB}\"\n _STRINGCHAR = rf\"{_SUTF1}|{_UTFMB}\"\n _TRAILCHAR = rf\"{_TUTF1}|{_UTFMB}\"\n _STRING_RE = re.compile(\n rf\"\"\"\n (\n ({_LEADCHAR}|{_PAIR})\n (\n ({_STRINGCHAR}|{_PAIR})*\n ({_TRAILCHAR}|{_PAIR})\n )?\n )?\n \"\"\",\n re.VERBOSE,\n )\n _HEXSTRING_RE = re.compile(r\"#([\\da-zA-Z]{2})+\")\n\n def __init__(self, data: str) -> None:\n self._data = data\n self._idx = 0\n\n def _has_data(self) -> bool:\n return self._idx < len(self._data)\n\n def _peek(self) -> typing.Optional[str]:\n if self._has_data():\n return self._data[self._idx]\n return None\n\n def _read_char(self, ch: str) -> None:\n if self._peek() != ch:\n raise ValueError\n self._idx += 1\n\n def _read_re(self, pat) -> str:\n match = pat.match(self._data, pos=self._idx)\n if match is None:\n raise ValueError\n val = match.group()\n self._idx += len(val)\n return val\n\n def parse(self) -> Name:\n rdns = [self._parse_rdn()]\n\n while self._has_data():\n self._read_char(\",\")\n rdns.append(self._parse_rdn())\n\n return Name(rdns)\n\n def _parse_rdn(self) -> RelativeDistinguishedName:\n nas = [self._parse_na()]\n while self._peek() == \"+\":\n self._read_char(\"+\")\n nas.append(self._parse_na())\n\n return RelativeDistinguishedName(nas)\n\n def _parse_na(self) -> NameAttribute:\n try:\n oid_value = self._read_re(self._OID_RE)\n except ValueError:\n name = self._read_re(self._DESCR_RE)\n oid = _NAME_TO_NAMEOID.get(name)\n if oid is None:\n raise ValueError\n else:\n oid = ObjectIdentifier(oid_value)\n\n self._read_char(\"=\")\n if self._peek() == \"#\":\n value = self._read_re(self._HEXSTRING_RE)\n value = binascii.unhexlify(value[1:]).decode()\n else:\n raw_value = self._read_re(self._STRING_RE)\n value = _unescape_dn_value(raw_value)\n\n return NameAttribute(oid, value)\n","path":"src/cryptography/x509/name.py"}],"string":"[\n {\n \"content\": \"# This file is dual licensed under the terms of the Apache License, Version\\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\\n# for complete details.\\n\\nimport binascii\\nimport re\\nimport sys\\nimport typing\\nimport warnings\\n\\nfrom cryptography import utils\\nfrom cryptography.hazmat.bindings._rust import (\\n x509 as rust_x509,\\n)\\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\\n\\n\\nclass _ASN1Type(utils.Enum):\\n BitString = 3\\n OctetString = 4\\n UTF8String = 12\\n NumericString = 18\\n PrintableString = 19\\n T61String = 20\\n IA5String = 22\\n UTCTime = 23\\n GeneralizedTime = 24\\n VisibleString = 26\\n UniversalString = 28\\n BMPString = 30\\n\\n\\n_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}\\n_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {\\n NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,\\n NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,\\n NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,\\n NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,\\n NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,\\n NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,\\n}\\n\\n# Type alias\\n_OidNameMap = typing.Mapping[ObjectIdentifier, str]\\n\\n#: Short attribute names from RFC 4514:\\n#: https://tools.ietf.org/html/rfc4514#page-7\\n_NAMEOID_TO_NAME: _OidNameMap = {\\n NameOID.COMMON_NAME: \\\"CN\\\",\\n NameOID.LOCALITY_NAME: \\\"L\\\",\\n NameOID.STATE_OR_PROVINCE_NAME: \\\"ST\\\",\\n NameOID.ORGANIZATION_NAME: \\\"O\\\",\\n NameOID.ORGANIZATIONAL_UNIT_NAME: \\\"OU\\\",\\n NameOID.COUNTRY_NAME: \\\"C\\\",\\n NameOID.STREET_ADDRESS: \\\"STREET\\\",\\n NameOID.DOMAIN_COMPONENT: \\\"DC\\\",\\n NameOID.USER_ID: \\\"UID\\\",\\n}\\n_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}\\n\\n\\ndef _escape_dn_value(val: typing.Union[str, bytes]) -> str:\\n \\\"\\\"\\\"Escape special characters in RFC4514 Distinguished Name value.\\\"\\\"\\\"\\n\\n if not val:\\n return \\\"\\\"\\n\\n # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character\\n # followed by the hexadecimal encoding of the octets.\\n if isinstance(val, bytes):\\n return \\\"#\\\" + binascii.hexlify(val).decode(\\\"utf8\\\")\\n\\n # See https://tools.ietf.org/html/rfc4514#section-2.4\\n val = val.replace(\\\"\\\\\\\\\\\", \\\"\\\\\\\\\\\\\\\\\\\")\\n val = val.replace('\\\"', '\\\\\\\\\\\"')\\n val = val.replace(\\\"+\\\", \\\"\\\\\\\\+\\\")\\n val = val.replace(\\\",\\\", \\\"\\\\\\\\,\\\")\\n val = val.replace(\\\";\\\", \\\"\\\\\\\\;\\\")\\n val = val.replace(\\\"<\\\", \\\"\\\\\\\\<\\\")\\n val = val.replace(\\\">\\\", \\\"\\\\\\\\>\\\")\\n val = val.replace(\\\"\\\\0\\\", \\\"\\\\\\\\00\\\")\\n\\n if val[0] in (\\\"#\\\", \\\" \\\"):\\n val = \\\"\\\\\\\\\\\" + val\\n if val[-1] == \\\" \\\":\\n val = val[:-1] + \\\"\\\\\\\\ \\\"\\n\\n return val\\n\\n\\ndef _unescape_dn_value(val: str) -> str:\\n if not val:\\n return \\\"\\\"\\n\\n # See https://tools.ietf.org/html/rfc4514#section-3\\n\\n # special = escaped / SPACE / SHARP / EQUALS\\n # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE\\n def sub(m):\\n val = m.group(1)\\n # Regular escape\\n if len(val) == 1:\\n return val\\n # Hex-value scape\\n return chr(int(val, 16))\\n\\n return _RFC4514NameParser._PAIR_RE.sub(sub, val)\\n\\n\\nclass NameAttribute:\\n def __init__(\\n self,\\n oid: ObjectIdentifier,\\n value: typing.Union[str, bytes],\\n _type: typing.Optional[_ASN1Type] = None,\\n *,\\n _validate: bool = True,\\n ) -> None:\\n if not isinstance(oid, ObjectIdentifier):\\n raise TypeError(\\n \\\"oid argument must be an ObjectIdentifier instance.\\\"\\n )\\n if _type == _ASN1Type.BitString:\\n if oid != NameOID.X500_UNIQUE_IDENTIFIER:\\n raise TypeError(\\n \\\"oid must be X500_UNIQUE_IDENTIFIER for BitString type.\\\"\\n )\\n if not isinstance(value, bytes):\\n raise TypeError(\\\"value must be bytes for BitString\\\")\\n else:\\n if not isinstance(value, str):\\n raise TypeError(\\\"value argument must be a str\\\")\\n\\n if (\\n oid == NameOID.COUNTRY_NAME\\n or oid == NameOID.JURISDICTION_COUNTRY_NAME\\n ):\\n assert isinstance(value, str)\\n c_len = len(value.encode(\\\"utf8\\\"))\\n if c_len != 2 and _validate is True:\\n raise ValueError(\\n \\\"Country name must be a 2 character country code\\\"\\n )\\n elif c_len != 2:\\n warnings.warn(\\n \\\"Country names should be two characters, but the \\\"\\n \\\"attribute is {} characters in length.\\\".format(c_len),\\n stacklevel=2,\\n )\\n\\n # The appropriate ASN1 string type varies by OID and is defined across\\n # multiple RFCs including 2459, 3280, and 5280. In general UTF8String\\n # is preferred (2459), but 3280 and 5280 specify several OIDs with\\n # alternate types. This means when we see the sentinel value we need\\n # to look up whether the OID has a non-UTF8 type. If it does, set it\\n # to that. Otherwise, UTF8!\\n if _type is None:\\n _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)\\n\\n if not isinstance(_type, _ASN1Type):\\n raise TypeError(\\\"_type must be from the _ASN1Type enum\\\")\\n\\n self._oid = oid\\n self._value = value\\n self._type = _type\\n\\n @property\\n def oid(self) -> ObjectIdentifier:\\n return self._oid\\n\\n @property\\n def value(self) -> typing.Union[str, bytes]:\\n return self._value\\n\\n @property\\n def rfc4514_attribute_name(self) -> str:\\n \\\"\\\"\\\"\\n The short attribute name (for example \\\"CN\\\") if available,\\n otherwise the OID dotted string.\\n \\\"\\\"\\\"\\n return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)\\n\\n def rfc4514_string(\\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\\n ) -> str:\\n \\\"\\\"\\\"\\n Format as RFC4514 Distinguished Name string.\\n\\n Use short attribute name if available, otherwise fall back to OID\\n dotted string.\\n \\\"\\\"\\\"\\n attr_name = (\\n attr_name_overrides.get(self.oid) if attr_name_overrides else None\\n )\\n if attr_name is None:\\n attr_name = self.rfc4514_attribute_name\\n\\n return f\\\"{attr_name}={_escape_dn_value(self.value)}\\\"\\n\\n def __eq__(self, other: object) -> bool:\\n if not isinstance(other, NameAttribute):\\n return NotImplemented\\n\\n return self.oid == other.oid and self.value == other.value\\n\\n def __hash__(self) -> int:\\n return hash((self.oid, self.value))\\n\\n def __repr__(self) -> str:\\n return \\\"\\\".format(self)\\n\\n\\nclass RelativeDistinguishedName:\\n def __init__(self, attributes: typing.Iterable[NameAttribute]):\\n attributes = list(attributes)\\n if not attributes:\\n raise ValueError(\\\"a relative distinguished name cannot be empty\\\")\\n if not all(isinstance(x, NameAttribute) for x in attributes):\\n raise TypeError(\\\"attributes must be an iterable of NameAttribute\\\")\\n\\n # Keep list and frozenset to preserve attribute order where it matters\\n self._attributes = attributes\\n self._attribute_set = frozenset(attributes)\\n\\n if len(self._attribute_set) != len(attributes):\\n raise ValueError(\\\"duplicate attributes are not allowed\\\")\\n\\n def get_attributes_for_oid(\\n self, oid: ObjectIdentifier\\n ) -> typing.List[NameAttribute]:\\n return [i for i in self if i.oid == oid]\\n\\n def rfc4514_string(\\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\\n ) -> str:\\n \\\"\\\"\\\"\\n Format as RFC4514 Distinguished Name string.\\n\\n Within each RDN, attributes are joined by '+', although that is rarely\\n used in certificates.\\n \\\"\\\"\\\"\\n return \\\"+\\\".join(\\n attr.rfc4514_string(attr_name_overrides)\\n for attr in self._attributes\\n )\\n\\n def __eq__(self, other: object) -> bool:\\n if not isinstance(other, RelativeDistinguishedName):\\n return NotImplemented\\n\\n return self._attribute_set == other._attribute_set\\n\\n def __hash__(self) -> int:\\n return hash(self._attribute_set)\\n\\n def __iter__(self) -> typing.Iterator[NameAttribute]:\\n return iter(self._attributes)\\n\\n def __len__(self) -> int:\\n return len(self._attributes)\\n\\n def __repr__(self) -> str:\\n return \\\"\\\".format(self.rfc4514_string())\\n\\n\\nclass Name:\\n @typing.overload\\n def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:\\n ...\\n\\n @typing.overload\\n def __init__(\\n self, attributes: typing.Iterable[RelativeDistinguishedName]\\n ) -> None:\\n ...\\n\\n def __init__(\\n self,\\n attributes: typing.Iterable[\\n typing.Union[NameAttribute, RelativeDistinguishedName]\\n ],\\n ) -> None:\\n attributes = list(attributes)\\n if all(isinstance(x, NameAttribute) for x in attributes):\\n self._attributes = [\\n RelativeDistinguishedName([typing.cast(NameAttribute, x)])\\n for x in attributes\\n ]\\n elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):\\n self._attributes = typing.cast(\\n typing.List[RelativeDistinguishedName], attributes\\n )\\n else:\\n raise TypeError(\\n \\\"attributes must be a list of NameAttribute\\\"\\n \\\" or a list RelativeDistinguishedName\\\"\\n )\\n\\n @classmethod\\n def from_rfc4514_string(cls, data: str) -> \\\"Name\\\":\\n return _RFC4514NameParser(data).parse()\\n\\n def rfc4514_string(\\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\\n ) -> str:\\n \\\"\\\"\\\"\\n Format as RFC4514 Distinguished Name string.\\n For example 'CN=foobar.com,O=Foo Corp,C=US'\\n\\n An X.509 name is a two-level structure: a list of sets of attributes.\\n Each list element is separated by ',' and within each list element, set\\n elements are separated by '+'. The latter is almost never used in\\n real world certificates. According to RFC4514 section 2.1 the\\n RDNSequence must be reversed when converting to string representation.\\n \\\"\\\"\\\"\\n return \\\",\\\".join(\\n attr.rfc4514_string(attr_name_overrides)\\n for attr in reversed(self._attributes)\\n )\\n\\n def get_attributes_for_oid(\\n self, oid: ObjectIdentifier\\n ) -> typing.List[NameAttribute]:\\n return [i for i in self if i.oid == oid]\\n\\n @property\\n def rdns(self) -> typing.List[RelativeDistinguishedName]:\\n return self._attributes\\n\\n def public_bytes(self, backend: typing.Any = None) -> bytes:\\n return rust_x509.encode_name_bytes(self)\\n\\n def __eq__(self, other: object) -> bool:\\n if not isinstance(other, Name):\\n return NotImplemented\\n\\n return self._attributes == other._attributes\\n\\n def __hash__(self) -> int:\\n # TODO: this is relatively expensive, if this looks like a bottleneck\\n # for you, consider optimizing!\\n return hash(tuple(self._attributes))\\n\\n def __iter__(self) -> typing.Iterator[NameAttribute]:\\n for rdn in self._attributes:\\n for ava in rdn:\\n yield ava\\n\\n def __len__(self) -> int:\\n return sum(len(rdn) for rdn in self._attributes)\\n\\n def __repr__(self) -> str:\\n rdns = \\\",\\\".join(attr.rfc4514_string() for attr in self._attributes)\\n return \\\"\\\".format(rdns)\\n\\n\\nclass _RFC4514NameParser:\\n _OID_RE = re.compile(r\\\"(0|([1-9]\\\\d*))(\\\\.(0|([1-9]\\\\d*)))+\\\")\\n _DESCR_RE = re.compile(r\\\"[a-zA-Z][a-zA-Z\\\\d-]*\\\")\\n\\n _PAIR = r\\\"\\\\\\\\([\\\\\\\\ #=\\\\\\\"\\\\+,;<>]|[\\\\da-zA-Z]{2})\\\"\\n _PAIR_RE = re.compile(_PAIR)\\n _LUTF1 = r\\\"[\\\\x01-\\\\x1f\\\\x21\\\\x24-\\\\x2A\\\\x2D-\\\\x3A\\\\x3D\\\\x3F-\\\\x5B\\\\x5D-\\\\x7F]\\\"\\n _SUTF1 = r\\\"[\\\\x01-\\\\x21\\\\x23-\\\\x2A\\\\x2D-\\\\x3A\\\\x3D\\\\x3F-\\\\x5B\\\\x5D-\\\\x7F]\\\"\\n _TUTF1 = r\\\"[\\\\x01-\\\\x1F\\\\x21\\\\x23-\\\\x2A\\\\x2D-\\\\x3A\\\\x3D\\\\x3F-\\\\x5B\\\\x5D-\\\\x7F]\\\"\\n _UTFMB = rf\\\"[\\\\x80-{chr(sys.maxunicode)}]\\\"\\n _LEADCHAR = rf\\\"{_LUTF1}|{_UTFMB}\\\"\\n _STRINGCHAR = rf\\\"{_SUTF1}|{_UTFMB}\\\"\\n _TRAILCHAR = rf\\\"{_TUTF1}|{_UTFMB}\\\"\\n _STRING_RE = re.compile(\\n rf\\\"\\\"\\\"\\n (\\n ({_LEADCHAR}|{_PAIR})\\n (\\n ({_STRINGCHAR}|{_PAIR})*\\n ({_TRAILCHAR}|{_PAIR})\\n )?\\n )?\\n \\\"\\\"\\\",\\n re.VERBOSE,\\n )\\n _HEXSTRING_RE = re.compile(r\\\"#([\\\\da-zA-Z]{2})+\\\")\\n\\n def __init__(self, data: str) -> None:\\n self._data = data\\n self._idx = 0\\n\\n def _has_data(self) -> bool:\\n return self._idx < len(self._data)\\n\\n def _peek(self) -> typing.Optional[str]:\\n if self._has_data():\\n return self._data[self._idx]\\n return None\\n\\n def _read_char(self, ch: str) -> None:\\n if self._peek() != ch:\\n raise ValueError\\n self._idx += 1\\n\\n def _read_re(self, pat) -> str:\\n match = pat.match(self._data, pos=self._idx)\\n if match is None:\\n raise ValueError\\n val = match.group()\\n self._idx += len(val)\\n return val\\n\\n def parse(self) -> Name:\\n rdns = [self._parse_rdn()]\\n\\n while self._has_data():\\n self._read_char(\\\",\\\")\\n rdns.append(self._parse_rdn())\\n\\n return Name(rdns)\\n\\n def _parse_rdn(self) -> RelativeDistinguishedName:\\n nas = [self._parse_na()]\\n while self._peek() == \\\"+\\\":\\n self._read_char(\\\"+\\\")\\n nas.append(self._parse_na())\\n\\n return RelativeDistinguishedName(nas)\\n\\n def _parse_na(self) -> NameAttribute:\\n try:\\n oid_value = self._read_re(self._OID_RE)\\n except ValueError:\\n name = self._read_re(self._DESCR_RE)\\n oid = _NAME_TO_NAMEOID.get(name)\\n if oid is None:\\n raise ValueError\\n else:\\n oid = ObjectIdentifier(oid_value)\\n\\n self._read_char(\\\"=\\\")\\n if self._peek() == \\\"#\\\":\\n value = self._read_re(self._HEXSTRING_RE)\\n value = binascii.unhexlify(value[1:]).decode()\\n else:\\n raw_value = self._read_re(self._STRING_RE)\\n value = _unescape_dn_value(raw_value)\\n\\n return NameAttribute(oid, value)\\n\",\n \"path\": \"src/cryptography/x509/name.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py\nindex 8b7e4ab430e4..4b32115781d1 100644\n--- a/src/cryptography/x509/name.py\n+++ b/src/cryptography/x509/name.py\n@@ -17,6 +17,7 @@\n \n class _ASN1Type(utils.Enum):\n BitString = 3\n+ OctetString = 4\n UTF8String = 12\n NumericString = 18\n PrintableString = 19\n"}}},{"rowIdx":436,"cells":{"in_source_id":{"kind":"string","value":"liqd__a4-meinberlin-1740"},"issue":{"kind":"string","value":"2 link limit on nav items\naffects the footer, need more than 2 items \n"},"before_files":{"kind":"list like","value":[{"content":"from django.db import models\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.core.models import Orderable\nfrom wagtail.snippets.models import register_snippet\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items', max_num=2)\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n","path":"meinberlin/apps/cms/models/navigation_menues.py"}],"string":"[\n {\n \"content\": \"from django.db import models\\nfrom modelcluster.fields import ParentalKey\\nfrom modelcluster.models import ClusterableModel\\nfrom wagtail.admin import edit_handlers\\nfrom wagtail.core.models import Orderable\\nfrom wagtail.snippets.models import register_snippet\\n\\n\\nclass MenuItem(models.Model):\\n title = models.CharField(max_length=255)\\n link_page = models.ForeignKey('wagtailcore.Page')\\n\\n @property\\n def url(self):\\n return self.link_page.url\\n\\n def __str__(self):\\n return self.title\\n\\n panels = [\\n edit_handlers.FieldPanel('title'),\\n edit_handlers.PageChooserPanel('link_page')\\n ]\\n\\n\\n@register_snippet\\nclass NavigationMenu(ClusterableModel):\\n title = models.CharField(max_length=255, null=False, blank=False)\\n\\n def __str__(self):\\n return self.title\\n\\n panels = [\\n edit_handlers.FieldPanel('title'),\\n edit_handlers.InlinePanel('items', max_num=2)\\n ]\\n\\n\\nclass NavigationMenuItem(Orderable, MenuItem):\\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\\n\",\n \"path\": \"meinberlin/apps/cms/models/navigation_menues.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"from django.db import models\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.core.models import Orderable\nfrom wagtail.snippets.models import register_snippet\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n","path":"meinberlin/apps/cms/models/navigation_menues.py"}],"string":"[\n {\n \"content\": \"from django.db import models\\nfrom modelcluster.fields import ParentalKey\\nfrom modelcluster.models import ClusterableModel\\nfrom wagtail.admin import edit_handlers\\nfrom wagtail.core.models import Orderable\\nfrom wagtail.snippets.models import register_snippet\\n\\n\\nclass MenuItem(models.Model):\\n title = models.CharField(max_length=255)\\n link_page = models.ForeignKey('wagtailcore.Page')\\n\\n @property\\n def url(self):\\n return self.link_page.url\\n\\n def __str__(self):\\n return self.title\\n\\n panels = [\\n edit_handlers.FieldPanel('title'),\\n edit_handlers.PageChooserPanel('link_page')\\n ]\\n\\n\\n@register_snippet\\nclass NavigationMenu(ClusterableModel):\\n title = models.CharField(max_length=255, null=False, blank=False)\\n\\n def __str__(self):\\n return self.title\\n\\n panels = [\\n edit_handlers.FieldPanel('title'),\\n edit_handlers.InlinePanel('items')\\n ]\\n\\n\\nclass NavigationMenuItem(Orderable, MenuItem):\\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\\n\",\n \"path\": \"meinberlin/apps/cms/models/navigation_menues.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/meinberlin/apps/cms/models/navigation_menues.py b/meinberlin/apps/cms/models/navigation_menues.py\nindex 87eec0169b..d8b2ad88ff 100644\n--- a/meinberlin/apps/cms/models/navigation_menues.py\n+++ b/meinberlin/apps/cms/models/navigation_menues.py\n@@ -32,7 +32,7 @@ def __str__(self):\n \n panels = [\n edit_handlers.FieldPanel('title'),\n- edit_handlers.InlinePanel('items', max_num=2)\n+ edit_handlers.InlinePanel('items')\n ]\n \n \ndiff --git a/meinberlin/assets/scss/components/_footer.scss b/meinberlin/assets/scss/components/_footer.scss\nindex 17616dd3b9..a04f779997 100644\n--- a/meinberlin/assets/scss/components/_footer.scss\n+++ b/meinberlin/assets/scss/components/_footer.scss\n@@ -20,7 +20,9 @@ body {\n \n .footer-nav__link {\n display: inline-block;\n- padding: 0.7em;\n+ padding-left: $padding;\n+ padding-top: 0.7em;\n+ padding-bottom: 0.7em;\n color: inherit;\n text-decoration: none;\n \n@@ -43,3 +45,11 @@ body {\n float: left;\n }\n }\n+\n+.beberlin__logo {\n+ padding-right: $padding;\n+}\n+\n+.berlin__logo {\n+ padding-left: $padding;\n+}\ndiff --git a/meinberlin/templates/footer.html b/meinberlin/templates/footer.html\nindex ba327ddc68..506015f099 100644\n--- a/meinberlin/templates/footer.html\n+++ b/meinberlin/templates/footer.html\n@@ -3,11 +3,17 @@\n
\n
\n
\n-
\n
\n"}}},{"rowIdx":437,"cells":{"in_source_id":{"kind":"string","value":"pytorch__rl-1536"},"issue":{"kind":"string","value":"[BUG] TruncatedNormal crashing when computing entropy\n## Describe the bug\r\n\r\nCalling `.entropy()` on a `TruncatedNormal` distribution causes the code to crash.\r\n\r\n## To Reproduce\r\n\r\nFirst crash happened using a PPO agent with entropy bonus turned on and actor parametrized with a `TruncatedNormal`.\r\nA simple snippet to reproduce is the following:\r\n\r\n```python\r\nimport torch\r\nfrom torchrl.modules.distributions import IndependentNormal, TruncatedNormal\r\n\r\nif __name__ == '__main__':\r\n\tloc, scale = torch.zeros(1), torch.ones(1)\r\n\td1 = IndependentNormal(loc, scale)\r\n\tprint(d1.entropy())\r\n\t\r\n\td2 = TruncatedNormal(loc, scale)\r\n\tprint(d2.entropy())\r\n```\r\n\r\n```bash\r\ntensor(1.4189)\r\nTraceback (most recent call last):\r\n File \"/home/diego/Desktop/test.py\", line 10, in \r\n print(d2.entropy())\r\n File \"/home/diego/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/distributions/independent.py\", line 103, in entropy\r\n entropy = self.base_dist.entropy()\r\nTypeError: 'Tensor' object is not callable\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe entropy value should be returned.\r\n\r\n## System info\r\n* Python 3.10.12\r\n* torch 2.0.1\r\n\r\n```python\r\nimport torchrl, numpy, sys\r\nprint(torchrl.__version__, numpy.__version__, sys.version, sys.platform)\r\n```\r\n```\r\n0.1.1 1.25.1 3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0] linux\r\n```\r\n## Reason and Possible fixes\r\n\r\nIn the `TruncatedStandardNormal` class, the `self._entropy` attribute is a constant tensor computed at initialization. For some reason, calling `TruncatedStandardNormal.entropy` returns the `self._entropy` attribute, rather than the `entropy()` property:\r\n\r\n```python\r\nimport torch\r\nfrom torchrl.modules.distributions.truncated_normal import TruncatedStandardNormal\r\nloc, scale = torch.zeros(1), torch.ones(1)\r\nprint(TruncatedStandardNormal(loc, scale).entropy)\r\nprint(TruncatedStandardNormal(loc, scale).entropy())\r\n```\r\n\r\n```bash\r\ntensor([-0.0104])\r\nTraceback (most recent call last):\r\n File \"/home/diego/Desktop/test.py\", line 5, in \r\n print(TruncatedStandardNormal(loc, scale).entropy())\r\nTypeError: 'Tensor' object is not callable\r\n\r\n```\r\n\r\n## Checklist\r\n\r\n- [x] I have checked that there is no similar issue in the repo (**required**)\r\n- [x] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)\r\n- [x] I have provided a minimal working example to reproduce the bug (**required**)\r\n\n"},"before_files":{"kind":"list like","value":[{"content":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# from https://github.com/toshas/torch_truncnorm\n\nimport math\nfrom numbers import Number\n\nimport torch\nfrom torch.distributions import constraints, Distribution\nfrom torch.distributions.utils import broadcast_all\n\nCONST_SQRT_2 = math.sqrt(2)\nCONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\nCONST_INV_SQRT_2 = 1 / math.sqrt(2)\nCONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)\nCONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)\n\n\nclass TruncatedStandardNormal(Distribution):\n \"\"\"Truncated Standard Normal distribution.\n\n Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n arg_constraints = {\n \"a\": constraints.real,\n \"b\": constraints.real,\n }\n has_rsample = True\n eps = 1e-6\n\n def __init__(self, a, b, validate_args=None):\n self.a, self.b = broadcast_all(a, b)\n if isinstance(a, Number) and isinstance(b, Number):\n batch_shape = torch.Size()\n else:\n batch_shape = self.a.size()\n super(TruncatedStandardNormal, self).__init__(\n batch_shape, validate_args=validate_args\n )\n if self.a.dtype != self.b.dtype:\n raise ValueError(\"Truncation bounds types are different\")\n if any(\n (self.a >= self.b)\n .view(\n -1,\n )\n .tolist()\n ):\n raise ValueError(\"Incorrect truncation range\")\n eps = self.eps\n self._dtype_min_gt_0 = eps\n self._dtype_max_lt_1 = 1 - eps\n self._little_phi_a = self._little_phi(self.a)\n self._little_phi_b = self._little_phi(self.b)\n self._big_phi_a = self._big_phi(self.a)\n self._big_phi_b = self._big_phi(self.b)\n self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps)\n self._log_Z = self._Z.log()\n little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan)\n little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan)\n self._lpbb_m_lpaa_d_Z = (\n self._little_phi_b * little_phi_coeff_b\n - self._little_phi_a * little_phi_coeff_a\n ) / self._Z\n self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z\n self._variance = (\n 1\n - self._lpbb_m_lpaa_d_Z\n - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2\n )\n self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z\n\n @constraints.dependent_property\n def support(self):\n return constraints.interval(self.a, self.b)\n\n @property\n def mean(self):\n return self._mean\n\n @property\n def variance(self):\n return self._variance\n\n @property\n def entropy(self):\n return self._entropy\n\n @property\n def auc(self):\n return self._Z\n\n @staticmethod\n def _little_phi(x):\n return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI\n\n def _big_phi(self, x):\n phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())\n return phi.clamp(self.eps, 1 - self.eps)\n\n @staticmethod\n def _inv_big_phi(x):\n return CONST_SQRT_2 * (2 * x - 1).erfinv()\n\n def cdf(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)\n\n def icdf(self, value):\n y = self._big_phi_a + value * self._Z\n y = y.clamp(self.eps, 1 - self.eps)\n return self._inv_big_phi(y)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5\n\n def rsample(self, sample_shape=None):\n if sample_shape is None:\n sample_shape = torch.Size([])\n shape = self._extended_shape(sample_shape)\n p = torch.empty(shape, device=self.a.device).uniform_(\n self._dtype_min_gt_0, self._dtype_max_lt_1\n )\n return self.icdf(p)\n\n\nclass TruncatedNormal(TruncatedStandardNormal):\n \"\"\"Truncated Normal distribution.\n\n https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n has_rsample = True\n\n def __init__(self, loc, scale, a, b, validate_args=None):\n scale = scale.clamp_min(self.eps)\n self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b)\n self._non_std_a = a\n self._non_std_b = b\n a = (a - self.loc) / self.scale\n b = (b - self.loc) / self.scale\n super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args)\n self._log_scale = self.scale.log()\n self._mean = self._mean * self.scale + self.loc\n self._variance = self._variance * self.scale**2\n self._entropy += self._log_scale\n\n def _to_std_rv(self, value):\n return (value - self.loc) / self.scale\n\n def _from_std_rv(self, value):\n return value * self.scale + self.loc\n\n def cdf(self, value):\n return super(TruncatedNormal, self).cdf(self._to_std_rv(value))\n\n def icdf(self, value):\n sample = self._from_std_rv(super().icdf(value))\n\n # clamp data but keep gradients\n sample_clip = torch.stack(\n [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0\n ).max(0)[0]\n sample_clip = torch.stack(\n [sample_clip, self._non_std_b.detach().expand_as(sample)], 0\n ).min(0)[0]\n sample.data.copy_(sample_clip)\n return sample\n\n def log_prob(self, value):\n value = self._to_std_rv(value)\n return super(TruncatedNormal, self).log_prob(value) - self._log_scale\n","path":"torchrl/modules/distributions/truncated_normal.py"}],"string":"[\n {\n \"content\": \"# Copyright (c) Meta Platforms, Inc. and affiliates.\\n#\\n# This source code is licensed under the MIT license found in the\\n# LICENSE file in the root directory of this source tree.\\n\\n\\n# from https://github.com/toshas/torch_truncnorm\\n\\nimport math\\nfrom numbers import Number\\n\\nimport torch\\nfrom torch.distributions import constraints, Distribution\\nfrom torch.distributions.utils import broadcast_all\\n\\nCONST_SQRT_2 = math.sqrt(2)\\nCONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\\nCONST_INV_SQRT_2 = 1 / math.sqrt(2)\\nCONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)\\nCONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)\\n\\n\\nclass TruncatedStandardNormal(Distribution):\\n \\\"\\\"\\\"Truncated Standard Normal distribution.\\n\\n Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\\n \\\"\\\"\\\"\\n\\n arg_constraints = {\\n \\\"a\\\": constraints.real,\\n \\\"b\\\": constraints.real,\\n }\\n has_rsample = True\\n eps = 1e-6\\n\\n def __init__(self, a, b, validate_args=None):\\n self.a, self.b = broadcast_all(a, b)\\n if isinstance(a, Number) and isinstance(b, Number):\\n batch_shape = torch.Size()\\n else:\\n batch_shape = self.a.size()\\n super(TruncatedStandardNormal, self).__init__(\\n batch_shape, validate_args=validate_args\\n )\\n if self.a.dtype != self.b.dtype:\\n raise ValueError(\\\"Truncation bounds types are different\\\")\\n if any(\\n (self.a >= self.b)\\n .view(\\n -1,\\n )\\n .tolist()\\n ):\\n raise ValueError(\\\"Incorrect truncation range\\\")\\n eps = self.eps\\n self._dtype_min_gt_0 = eps\\n self._dtype_max_lt_1 = 1 - eps\\n self._little_phi_a = self._little_phi(self.a)\\n self._little_phi_b = self._little_phi(self.b)\\n self._big_phi_a = self._big_phi(self.a)\\n self._big_phi_b = self._big_phi(self.b)\\n self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps)\\n self._log_Z = self._Z.log()\\n little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan)\\n little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan)\\n self._lpbb_m_lpaa_d_Z = (\\n self._little_phi_b * little_phi_coeff_b\\n - self._little_phi_a * little_phi_coeff_a\\n ) / self._Z\\n self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z\\n self._variance = (\\n 1\\n - self._lpbb_m_lpaa_d_Z\\n - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2\\n )\\n self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z\\n\\n @constraints.dependent_property\\n def support(self):\\n return constraints.interval(self.a, self.b)\\n\\n @property\\n def mean(self):\\n return self._mean\\n\\n @property\\n def variance(self):\\n return self._variance\\n\\n @property\\n def entropy(self):\\n return self._entropy\\n\\n @property\\n def auc(self):\\n return self._Z\\n\\n @staticmethod\\n def _little_phi(x):\\n return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI\\n\\n def _big_phi(self, x):\\n phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())\\n return phi.clamp(self.eps, 1 - self.eps)\\n\\n @staticmethod\\n def _inv_big_phi(x):\\n return CONST_SQRT_2 * (2 * x - 1).erfinv()\\n\\n def cdf(self, value):\\n if self._validate_args:\\n self._validate_sample(value)\\n return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)\\n\\n def icdf(self, value):\\n y = self._big_phi_a + value * self._Z\\n y = y.clamp(self.eps, 1 - self.eps)\\n return self._inv_big_phi(y)\\n\\n def log_prob(self, value):\\n if self._validate_args:\\n self._validate_sample(value)\\n return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5\\n\\n def rsample(self, sample_shape=None):\\n if sample_shape is None:\\n sample_shape = torch.Size([])\\n shape = self._extended_shape(sample_shape)\\n p = torch.empty(shape, device=self.a.device).uniform_(\\n self._dtype_min_gt_0, self._dtype_max_lt_1\\n )\\n return self.icdf(p)\\n\\n\\nclass TruncatedNormal(TruncatedStandardNormal):\\n \\\"\\\"\\\"Truncated Normal distribution.\\n\\n https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\\n \\\"\\\"\\\"\\n\\n has_rsample = True\\n\\n def __init__(self, loc, scale, a, b, validate_args=None):\\n scale = scale.clamp_min(self.eps)\\n self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b)\\n self._non_std_a = a\\n self._non_std_b = b\\n a = (a - self.loc) / self.scale\\n b = (b - self.loc) / self.scale\\n super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args)\\n self._log_scale = self.scale.log()\\n self._mean = self._mean * self.scale + self.loc\\n self._variance = self._variance * self.scale**2\\n self._entropy += self._log_scale\\n\\n def _to_std_rv(self, value):\\n return (value - self.loc) / self.scale\\n\\n def _from_std_rv(self, value):\\n return value * self.scale + self.loc\\n\\n def cdf(self, value):\\n return super(TruncatedNormal, self).cdf(self._to_std_rv(value))\\n\\n def icdf(self, value):\\n sample = self._from_std_rv(super().icdf(value))\\n\\n # clamp data but keep gradients\\n sample_clip = torch.stack(\\n [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0\\n ).max(0)[0]\\n sample_clip = torch.stack(\\n [sample_clip, self._non_std_b.detach().expand_as(sample)], 0\\n ).min(0)[0]\\n sample.data.copy_(sample_clip)\\n return sample\\n\\n def log_prob(self, value):\\n value = self._to_std_rv(value)\\n return super(TruncatedNormal, self).log_prob(value) - self._log_scale\\n\",\n \"path\": \"torchrl/modules/distributions/truncated_normal.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# from https://github.com/toshas/torch_truncnorm\n\nimport math\nfrom numbers import Number\n\nimport torch\nfrom torch.distributions import constraints, Distribution\nfrom torch.distributions.utils import broadcast_all\n\nCONST_SQRT_2 = math.sqrt(2)\nCONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\nCONST_INV_SQRT_2 = 1 / math.sqrt(2)\nCONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)\nCONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)\n\n\nclass TruncatedStandardNormal(Distribution):\n \"\"\"Truncated Standard Normal distribution.\n\n Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n arg_constraints = {\n \"a\": constraints.real,\n \"b\": constraints.real,\n }\n has_rsample = True\n eps = 1e-6\n\n def __init__(self, a, b, validate_args=None):\n self.a, self.b = broadcast_all(a, b)\n if isinstance(a, Number) and isinstance(b, Number):\n batch_shape = torch.Size()\n else:\n batch_shape = self.a.size()\n super(TruncatedStandardNormal, self).__init__(\n batch_shape, validate_args=validate_args\n )\n if self.a.dtype != self.b.dtype:\n raise ValueError(\"Truncation bounds types are different\")\n if any(\n (self.a >= self.b)\n .view(\n -1,\n )\n .tolist()\n ):\n raise ValueError(\"Incorrect truncation range\")\n eps = self.eps\n self._dtype_min_gt_0 = eps\n self._dtype_max_lt_1 = 1 - eps\n self._little_phi_a = self._little_phi(self.a)\n self._little_phi_b = self._little_phi(self.b)\n self._big_phi_a = self._big_phi(self.a)\n self._big_phi_b = self._big_phi(self.b)\n self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps)\n self._log_Z = self._Z.log()\n little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan)\n little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan)\n self._lpbb_m_lpaa_d_Z = (\n self._little_phi_b * little_phi_coeff_b\n - self._little_phi_a * little_phi_coeff_a\n ) / self._Z\n self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z\n self._variance = (\n 1\n - self._lpbb_m_lpaa_d_Z\n - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2\n )\n self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z\n\n @constraints.dependent_property\n def support(self):\n return constraints.interval(self.a, self.b)\n\n @property\n def mean(self):\n return self._mean\n\n @property\n def variance(self):\n return self._variance\n\n def entropy(self):\n return self._entropy\n\n @property\n def auc(self):\n return self._Z\n\n @staticmethod\n def _little_phi(x):\n return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI\n\n def _big_phi(self, x):\n phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())\n return phi.clamp(self.eps, 1 - self.eps)\n\n @staticmethod\n def _inv_big_phi(x):\n return CONST_SQRT_2 * (2 * x - 1).erfinv()\n\n def cdf(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)\n\n def icdf(self, value):\n y = self._big_phi_a + value * self._Z\n y = y.clamp(self.eps, 1 - self.eps)\n return self._inv_big_phi(y)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5\n\n def rsample(self, sample_shape=None):\n if sample_shape is None:\n sample_shape = torch.Size([])\n shape = self._extended_shape(sample_shape)\n p = torch.empty(shape, device=self.a.device).uniform_(\n self._dtype_min_gt_0, self._dtype_max_lt_1\n )\n return self.icdf(p)\n\n\nclass TruncatedNormal(TruncatedStandardNormal):\n \"\"\"Truncated Normal distribution.\n\n https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n has_rsample = True\n\n def __init__(self, loc, scale, a, b, validate_args=None):\n scale = scale.clamp_min(self.eps)\n self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b)\n self._non_std_a = a\n self._non_std_b = b\n a = (a - self.loc) / self.scale\n b = (b - self.loc) / self.scale\n super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args)\n self._log_scale = self.scale.log()\n self._mean = self._mean * self.scale + self.loc\n self._variance = self._variance * self.scale**2\n self._entropy += self._log_scale\n\n def _to_std_rv(self, value):\n return (value - self.loc) / self.scale\n\n def _from_std_rv(self, value):\n return value * self.scale + self.loc\n\n def cdf(self, value):\n return super(TruncatedNormal, self).cdf(self._to_std_rv(value))\n\n def icdf(self, value):\n sample = self._from_std_rv(super().icdf(value))\n\n # clamp data but keep gradients\n sample_clip = torch.stack(\n [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0\n ).max(0)[0]\n sample_clip = torch.stack(\n [sample_clip, self._non_std_b.detach().expand_as(sample)], 0\n ).min(0)[0]\n sample.data.copy_(sample_clip)\n return sample\n\n def log_prob(self, value):\n value = self._to_std_rv(value)\n return super(TruncatedNormal, self).log_prob(value) - self._log_scale\n","path":"torchrl/modules/distributions/truncated_normal.py"}],"string":"[\n {\n \"content\": \"# Copyright (c) Meta Platforms, Inc. and affiliates.\\n#\\n# This source code is licensed under the MIT license found in the\\n# LICENSE file in the root directory of this source tree.\\n\\n\\n# from https://github.com/toshas/torch_truncnorm\\n\\nimport math\\nfrom numbers import Number\\n\\nimport torch\\nfrom torch.distributions import constraints, Distribution\\nfrom torch.distributions.utils import broadcast_all\\n\\nCONST_SQRT_2 = math.sqrt(2)\\nCONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\\nCONST_INV_SQRT_2 = 1 / math.sqrt(2)\\nCONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)\\nCONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)\\n\\n\\nclass TruncatedStandardNormal(Distribution):\\n \\\"\\\"\\\"Truncated Standard Normal distribution.\\n\\n Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\\n \\\"\\\"\\\"\\n\\n arg_constraints = {\\n \\\"a\\\": constraints.real,\\n \\\"b\\\": constraints.real,\\n }\\n has_rsample = True\\n eps = 1e-6\\n\\n def __init__(self, a, b, validate_args=None):\\n self.a, self.b = broadcast_all(a, b)\\n if isinstance(a, Number) and isinstance(b, Number):\\n batch_shape = torch.Size()\\n else:\\n batch_shape = self.a.size()\\n super(TruncatedStandardNormal, self).__init__(\\n batch_shape, validate_args=validate_args\\n )\\n if self.a.dtype != self.b.dtype:\\n raise ValueError(\\\"Truncation bounds types are different\\\")\\n if any(\\n (self.a >= self.b)\\n .view(\\n -1,\\n )\\n .tolist()\\n ):\\n raise ValueError(\\\"Incorrect truncation range\\\")\\n eps = self.eps\\n self._dtype_min_gt_0 = eps\\n self._dtype_max_lt_1 = 1 - eps\\n self._little_phi_a = self._little_phi(self.a)\\n self._little_phi_b = self._little_phi(self.b)\\n self._big_phi_a = self._big_phi(self.a)\\n self._big_phi_b = self._big_phi(self.b)\\n self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps)\\n self._log_Z = self._Z.log()\\n little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan)\\n little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan)\\n self._lpbb_m_lpaa_d_Z = (\\n self._little_phi_b * little_phi_coeff_b\\n - self._little_phi_a * little_phi_coeff_a\\n ) / self._Z\\n self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z\\n self._variance = (\\n 1\\n - self._lpbb_m_lpaa_d_Z\\n - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2\\n )\\n self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z\\n\\n @constraints.dependent_property\\n def support(self):\\n return constraints.interval(self.a, self.b)\\n\\n @property\\n def mean(self):\\n return self._mean\\n\\n @property\\n def variance(self):\\n return self._variance\\n\\n def entropy(self):\\n return self._entropy\\n\\n @property\\n def auc(self):\\n return self._Z\\n\\n @staticmethod\\n def _little_phi(x):\\n return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI\\n\\n def _big_phi(self, x):\\n phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())\\n return phi.clamp(self.eps, 1 - self.eps)\\n\\n @staticmethod\\n def _inv_big_phi(x):\\n return CONST_SQRT_2 * (2 * x - 1).erfinv()\\n\\n def cdf(self, value):\\n if self._validate_args:\\n self._validate_sample(value)\\n return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)\\n\\n def icdf(self, value):\\n y = self._big_phi_a + value * self._Z\\n y = y.clamp(self.eps, 1 - self.eps)\\n return self._inv_big_phi(y)\\n\\n def log_prob(self, value):\\n if self._validate_args:\\n self._validate_sample(value)\\n return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5\\n\\n def rsample(self, sample_shape=None):\\n if sample_shape is None:\\n sample_shape = torch.Size([])\\n shape = self._extended_shape(sample_shape)\\n p = torch.empty(shape, device=self.a.device).uniform_(\\n self._dtype_min_gt_0, self._dtype_max_lt_1\\n )\\n return self.icdf(p)\\n\\n\\nclass TruncatedNormal(TruncatedStandardNormal):\\n \\\"\\\"\\\"Truncated Normal distribution.\\n\\n https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\\n \\\"\\\"\\\"\\n\\n has_rsample = True\\n\\n def __init__(self, loc, scale, a, b, validate_args=None):\\n scale = scale.clamp_min(self.eps)\\n self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b)\\n self._non_std_a = a\\n self._non_std_b = b\\n a = (a - self.loc) / self.scale\\n b = (b - self.loc) / self.scale\\n super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args)\\n self._log_scale = self.scale.log()\\n self._mean = self._mean * self.scale + self.loc\\n self._variance = self._variance * self.scale**2\\n self._entropy += self._log_scale\\n\\n def _to_std_rv(self, value):\\n return (value - self.loc) / self.scale\\n\\n def _from_std_rv(self, value):\\n return value * self.scale + self.loc\\n\\n def cdf(self, value):\\n return super(TruncatedNormal, self).cdf(self._to_std_rv(value))\\n\\n def icdf(self, value):\\n sample = self._from_std_rv(super().icdf(value))\\n\\n # clamp data but keep gradients\\n sample_clip = torch.stack(\\n [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0\\n ).max(0)[0]\\n sample_clip = torch.stack(\\n [sample_clip, self._non_std_b.detach().expand_as(sample)], 0\\n ).min(0)[0]\\n sample.data.copy_(sample_clip)\\n return sample\\n\\n def log_prob(self, value):\\n value = self._to_std_rv(value)\\n return super(TruncatedNormal, self).log_prob(value) - self._log_scale\\n\",\n \"path\": \"torchrl/modules/distributions/truncated_normal.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/test/test_distributions.py b/test/test_distributions.py\nindex c804dcf7e53..30bb0288dd4 100644\n--- a/test/test_distributions.py\n+++ b/test/test_distributions.py\n@@ -30,40 +30,46 @@\n \n @pytest.mark.skipif(torch.__version__ < \"2.0\", reason=\"torch 2.0 is required\")\n @pytest.mark.parametrize(\"device\", get_default_devices())\n-@pytest.mark.parametrize(\"div_up\", [1, 2])\n-@pytest.mark.parametrize(\"div_down\", [1, 2])\n-def test_delta(device, div_up, div_down):\n- x = torch.randn(1000000, 4, device=device, dtype=torch.double)\n- d = Delta(x)\n- assert d.log_prob(d.mode).shape == x.shape[:-1]\n- assert (d.log_prob(d.mode) == float(\"inf\")).all()\n-\n- x = torch.randn(1000000, 4, device=device, dtype=torch.double)\n- d = TanhDelta(x, -1 / div_down, 1.0 / div_up, atol=1e-4, rtol=1e-4)\n- xinv = d.transforms[0].inv(d.mode)\n- assert d.base_dist._is_equal(xinv).all()\n- assert d.log_prob(d.mode).shape == x.shape[:-1]\n- assert (d.log_prob(d.mode) == float(\"inf\")).all()\n-\n- x = torch.randn(1000000, 4, device=device, dtype=torch.double)\n- d = TanhDelta(\n- x,\n- -torch.ones_like(x) / div_down,\n- torch.ones_like(x) / div_up,\n- atol=1e-4,\n- rtol=1e-4,\n- )\n- xinv = d.transforms[0].inv(d.mode)\n- assert d.base_dist._is_equal(xinv).all()\n- assert d.log_prob(d.mode).shape == x.shape[:-1]\n- assert (d.log_prob(d.mode) == float(\"inf\")).all()\n+class TestDelta:\n+ def test_delta_logprob(self, device):\n+ x = torch.randn(1000000, 4, device=device, dtype=torch.double)\n+ d = Delta(x)\n+ assert d.log_prob(d.mode).shape == x.shape[:-1]\n+ assert (d.log_prob(d.mode) == float(\"inf\")).all()\n+\n+ @pytest.mark.parametrize(\"div_up\", [1, 2])\n+ @pytest.mark.parametrize(\"div_down\", [1, 2])\n+ def test_tanhdelta_logprob(self, device, div_up, div_down):\n+ x = torch.randn(1000000, 4, device=device, dtype=torch.double)\n+ d = TanhDelta(x, -1 / div_down, 1.0 / div_up, atol=1e-4, rtol=1e-4)\n+ xinv = d.transforms[0].inv(d.mode)\n+ assert d.base_dist._is_equal(xinv).all()\n+ assert d.log_prob(d.mode).shape == x.shape[:-1]\n+ assert (d.log_prob(d.mode) == float(\"inf\")).all()\n+\n+ @pytest.mark.parametrize(\"div_up\", [1, 2])\n+ @pytest.mark.parametrize(\"div_down\", [1, 2])\n+ def test_tanhdelta_inv(self, device, div_up, div_down):\n+ x = torch.randn(1000000, 4, device=device, dtype=torch.double)\n+ d = TanhDelta(\n+ x,\n+ -torch.ones_like(x) / div_down,\n+ torch.ones_like(x) / div_up,\n+ atol=1e-4,\n+ rtol=1e-4,\n+ )\n+ xinv = d.transforms[0].inv(d.mode)\n+ assert d.base_dist._is_equal(xinv).all()\n+ assert d.log_prob(d.mode).shape == x.shape[:-1]\n+ assert (d.log_prob(d.mode) == float(\"inf\")).all()\n \n- x = torch.randn(1000000, 4, device=device)\n- d = TanhDelta(x, -torch.ones_like(x), torch.ones_like(x), atol=1e-4, rtol=1e-4)\n- xinv = d.transforms[0].inv(d.mode)\n- assert d.base_dist._is_equal(xinv).all()\n- assert d.log_prob(d.mode).shape == x.shape[:-1]\n- assert (d.log_prob(d.mode) == float(\"inf\")).all()\n+ def test_tanhdelta_inv_ones(self, device):\n+ x = torch.randn(1000000, 4, device=device)\n+ d = TanhDelta(x, -torch.ones_like(x), torch.ones_like(x), atol=1e-4, rtol=1e-4)\n+ xinv = d.transforms[0].inv(d.mode)\n+ assert d.base_dist._is_equal(xinv).all()\n+ assert d.log_prob(d.mode).shape == x.shape[:-1]\n+ assert (d.log_prob(d.mode) == float(\"inf\")).all()\n \n \n def _map_all(*tensors_or_other, device):\n@@ -74,42 +80,43 @@ def _map_all(*tensors_or_other, device):\n yield t\n \n \n-@pytest.mark.parametrize(\n- \"min\", [-torch.ones(3), -1, 3 * torch.tensor([-1.0, -2.0, -0.5]), -0.1]\n-)\n-@pytest.mark.parametrize(\n- \"max\", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 0.1]\n-)\n-@pytest.mark.parametrize(\n- \"vecs\",\n- [\n- (torch.tensor([0.1, 10.0, 5.0]), torch.tensor([0.1, 10.0, 5.0])),\n- (torch.zeros(7, 3), torch.ones(7, 3)),\n- ],\n-)\n-@pytest.mark.parametrize(\n- \"upscale\", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 3]\n-)\n-@pytest.mark.parametrize(\"shape\", [torch.Size([]), torch.Size([3, 4])])\n-@pytest.mark.parametrize(\"device\", get_default_devices())\n-def test_tanhnormal(min, max, vecs, upscale, shape, device):\n- min, max, vecs, upscale, shape = _map_all(\n- min, max, vecs, upscale, shape, device=device\n+class TestTanhNormal:\n+ @pytest.mark.parametrize(\n+ \"min\", [-torch.ones(3), -1, 3 * torch.tensor([-1.0, -2.0, -0.5]), -0.1]\n )\n- torch.manual_seed(0)\n- d = TanhNormal(\n- *vecs,\n- upscale=upscale,\n- min=min,\n- max=max,\n+ @pytest.mark.parametrize(\n+ \"max\", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 0.1]\n )\n- for _ in range(100):\n- a = d.rsample(shape)\n- assert a.shape[: len(shape)] == shape\n- assert (a >= d.min).all()\n- assert (a <= d.max).all()\n- lp = d.log_prob(a)\n- assert torch.isfinite(lp).all()\n+ @pytest.mark.parametrize(\n+ \"vecs\",\n+ [\n+ (torch.tensor([0.1, 10.0, 5.0]), torch.tensor([0.1, 10.0, 5.0])),\n+ (torch.zeros(7, 3), torch.ones(7, 3)),\n+ ],\n+ )\n+ @pytest.mark.parametrize(\n+ \"upscale\", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 3]\n+ )\n+ @pytest.mark.parametrize(\"shape\", [torch.Size([]), torch.Size([3, 4])])\n+ @pytest.mark.parametrize(\"device\", get_default_devices())\n+ def test_tanhnormal(self, min, max, vecs, upscale, shape, device):\n+ min, max, vecs, upscale, shape = _map_all(\n+ min, max, vecs, upscale, shape, device=device\n+ )\n+ torch.manual_seed(0)\n+ d = TanhNormal(\n+ *vecs,\n+ upscale=upscale,\n+ min=min,\n+ max=max,\n+ )\n+ for _ in range(100):\n+ a = d.rsample(shape)\n+ assert a.shape[: len(shape)] == shape\n+ assert (a >= d.min).all()\n+ assert (a <= d.max).all()\n+ lp = d.log_prob(a)\n+ assert torch.isfinite(lp).all()\n \n \n @pytest.mark.parametrize(\n@@ -130,24 +137,40 @@ def test_tanhnormal(min, max, vecs, upscale, shape, device):\n )\n @pytest.mark.parametrize(\"shape\", [torch.Size([]), torch.Size([3, 4])])\n @pytest.mark.parametrize(\"device\", get_default_devices())\n-def test_truncnormal(min, max, vecs, upscale, shape, device):\n- torch.manual_seed(0)\n- min, max, vecs, upscale, shape = _map_all(\n- min, max, vecs, upscale, shape, device=device\n- )\n- d = TruncatedNormal(\n- *vecs,\n- upscale=upscale,\n- min=min,\n- max=max,\n- )\n- for _ in range(100):\n- a = d.rsample(shape)\n- assert a.shape[: len(shape)] == shape\n- assert (a >= d.min).all()\n- assert (a <= d.max).all()\n- lp = d.log_prob(a)\n- assert torch.isfinite(lp).all()\n+class TestTruncatedNormal:\n+ def test_truncnormal(self, min, max, vecs, upscale, shape, device):\n+ torch.manual_seed(0)\n+ min, max, vecs, upscale, shape = _map_all(\n+ min, max, vecs, upscale, shape, device=device\n+ )\n+ d = TruncatedNormal(\n+ *vecs,\n+ upscale=upscale,\n+ min=min,\n+ max=max,\n+ )\n+ for _ in range(100):\n+ a = d.rsample(shape)\n+ assert a.shape[: len(shape)] == shape\n+ assert (a >= d.min).all()\n+ assert (a <= d.max).all()\n+ lp = d.log_prob(a)\n+ assert torch.isfinite(lp).all()\n+\n+ def test_truncnormal_mode(self, min, max, vecs, upscale, shape, device):\n+ torch.manual_seed(0)\n+ min, max, vecs, upscale, shape = _map_all(\n+ min, max, vecs, upscale, shape, device=device\n+ )\n+ d = TruncatedNormal(\n+ *vecs,\n+ upscale=upscale,\n+ min=min,\n+ max=max,\n+ )\n+ assert d.mode is not None\n+ assert d.entropy() is not None\n+ assert d.mean is not None\n \n \n @pytest.mark.parametrize(\ndiff --git a/torchrl/modules/distributions/truncated_normal.py b/torchrl/modules/distributions/truncated_normal.py\nindex 1dfde393709..59b95658ea5 100644\n--- a/torchrl/modules/distributions/truncated_normal.py\n+++ b/torchrl/modules/distributions/truncated_normal.py\n@@ -87,7 +87,6 @@ def mean(self):\n def variance(self):\n return self._variance\n \n- @property\n def entropy(self):\n return self._entropy\n \n"}}},{"rowIdx":438,"cells":{"in_source_id":{"kind":"string","value":"conda__conda-build-3082"},"issue":{"kind":"string","value":"Build variants does not work with source repo revision variations\nWith a `meta.yaml` that contains:\r\n```\r\nsource:\r\n svn_url: an_url\r\n svn_rev: {{ version }}\r\n```\r\nand a `conda_build_config.yaml` with:\r\n```\r\nversion:\r\n - 1\r\n - 2\r\n```\r\nBoth packages are built from the same sources of the first version, the repo is only checked out once.\r\n\r\nAm I doing it properly or is it a bug?\r\n\n"},"before_files":{"kind":"list like","value":[{"content":"from __future__ import absolute_import, division, print_function\n\nimport contextlib\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport warnings\nfrom glob import glob\nfrom os.path import join, normpath\n\n# noqa here because PY3 is used only on windows, and trips up flake8 otherwise.\nfrom .conda_interface import text_type, PY3 # noqa\nfrom .conda_interface import (CondaError, LinkError, LockError, NoPackagesFoundError,\n PaddingError, UnsatisfiableError)\nfrom .conda_interface import display_actions, execute_actions, execute_plan, install_actions\nfrom .conda_interface import memoized\nfrom .conda_interface import package_cache, TemporaryDirectory\nfrom .conda_interface import pkgs_dirs, root_dir, symlink_conda\n\nfrom conda_build import utils\nfrom conda_build.exceptions import DependencyNeedsBuildingError\nfrom conda_build.features import feature_list\nfrom conda_build.index import get_build_index\nfrom conda_build.os_utils import external\nfrom conda_build.utils import ensure_list, prepend_bin_path\nfrom conda_build.variants import get_default_variant\n\n\n# these are things that we provide env vars for more explicitly. This list disables the\n# pass-through of variant values to env vars for these keys.\nLANGUAGES = ('PERL', 'LUA', 'R', \"NUMPY\", 'PYTHON')\n\n\ndef get_perl_ver(config):\n return '.'.join(config.variant.get('perl', get_default_variant(config)['perl']).split('.')[:2])\n\n\ndef get_lua_ver(config):\n return '.'.join(config.variant.get('lua', get_default_variant(config)['lua']).split('.')[:2])\n\n\ndef get_py_ver(config):\n py = config.variant.get('python', get_default_variant(config)['python'])\n if not hasattr(py, 'split'):\n py = py[0]\n return '.'.join(py.split('.')[:2])\n\n\ndef get_r_ver(config):\n return '.'.join(config.variant.get('r_base',\n get_default_variant(config)['r_base']).split('.')[:3])\n\n\ndef get_npy_ver(config):\n conda_npy = ''.join(str(config.variant.get('numpy') or\n get_default_variant(config)['numpy']).split('.'))\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n return conda_npy[0] + '.' + conda_npy[1:]\n\n\ndef get_lua_include_dir(config):\n return join(config.host_prefix, \"include\")\n\n\n@memoized\ndef verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=False,\n expected_rev='HEAD'):\n env = os.environ.copy()\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n if not expected_rev:\n return False\n\n OK = True\n\n env['GIT_DIR'] = git_dir\n try:\n # Verify current commit (minus our locally applied patches) matches expected commit\n current_commit = utils.check_output_env([git_exe,\n \"log\",\n \"-n1\",\n \"--format=%H\",\n \"HEAD\" + \"^\" * git_commits_since_tag],\n env=env, stderr=stderr)\n current_commit = current_commit.decode('utf-8')\n expected_tag_commit = utils.check_output_env([git_exe, \"log\", \"-n1\", \"--format=%H\",\n expected_rev],\n env=env, stderr=stderr)\n expected_tag_commit = expected_tag_commit.decode('utf-8')\n\n if current_commit != expected_tag_commit:\n return False\n\n # Verify correct remote url. Need to find the git cache directory,\n # and check the remote from there.\n cache_details = utils.check_output_env([git_exe, \"remote\", \"-v\"], env=env,\n stderr=stderr)\n cache_details = cache_details.decode('utf-8')\n cache_dir = cache_details.split('\\n')[0].split()[1]\n\n if not isinstance(cache_dir, str):\n # On Windows, subprocess env can't handle unicode.\n cache_dir = cache_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n try:\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n except subprocess.CalledProcessError:\n if sys.platform == 'win32' and cache_dir.startswith('/'):\n cache_dir = utils.convert_unix_path_to_win(cache_dir)\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n remote_details = remote_details.decode('utf-8')\n remote_url = remote_details.split('\\n')[0].split()[1]\n\n # on windows, remote URL comes back to us as cygwin or msys format. Python doesn't\n # know how to normalize it. Need to convert it to a windows path.\n if sys.platform == 'win32' and remote_url.startswith('/'):\n remote_url = utils.convert_unix_path_to_win(git_url)\n\n if os.path.exists(remote_url):\n # Local filepaths are allowed, but make sure we normalize them\n remote_url = normpath(remote_url)\n\n # If the current source directory in conda-bld/work doesn't match the user's\n # metadata git_url or git_rev, then we aren't looking at the right source.\n if not os.path.isdir(remote_url) and remote_url.lower() != git_url.lower():\n log.debug(\"remote does not match git_url\")\n log.debug(\"Remote: \" + remote_url.lower())\n log.debug(\"git_url: \" + git_url.lower())\n OK = False\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git information in verify_git_repo. Error was: \")\n log.debug(str(error))\n OK = False\n finally:\n if not debug:\n FNULL.close()\n return OK\n\n\n@memoized\ndef get_git_info(git_exe, repo, debug):\n \"\"\"\n Given a repo to a git repo, return a dictionary of:\n GIT_DESCRIBE_TAG\n GIT_DESCRIBE_NUMBER\n GIT_DESCRIBE_HASH\n GIT_FULL_HASH\n GIT_BUILD_STR\n from the output of git describe.\n :return:\n \"\"\"\n d = {}\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n # grab information from describe\n env = os.environ.copy()\n env['GIT_DIR'] = repo\n keys = [\"GIT_DESCRIBE_TAG\", \"GIT_DESCRIBE_NUMBER\", \"GIT_DESCRIBE_HASH\"]\n\n try:\n output = utils.check_output_env([git_exe, \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n if len(parts) == 3:\n d.update(dict(zip(keys, parts)))\n except subprocess.CalledProcessError:\n msg = (\n \"Failed to obtain git tag information.\\n\"\n \"Consider using annotated tags if you are not already \"\n \"as they are more reliable when used with git describe.\"\n )\n log.debug(msg)\n\n try:\n # get the _full_ hash of the current HEAD\n output = utils.check_output_env([git_exe, \"rev-parse\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n\n d['GIT_FULL_HASH'] = output\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git commit information. Error was: \")\n log.debug(str(error))\n\n # set up the build string\n if \"GIT_DESCRIBE_NUMBER\" in d and \"GIT_DESCRIBE_HASH\" in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[\"GIT_DESCRIBE_NUMBER\"],\n d[\"GIT_DESCRIBE_HASH\"])\n\n # issues on Windows with the next line of the command prompt being recorded here.\n assert not any(\"\\n\" in value for value in d.values())\n return d\n\n\ndef get_hg_build_info(repo):\n env = os.environ.copy()\n env['HG_DIR'] = repo\n env = {str(key): str(value) for key, value in env.items()}\n\n d = {}\n cmd = [\"hg\", \"log\", \"--template\",\n \"{rev}|{node|short}|{latesttag}|{latesttagdistance}|{branch}\",\n \"--rev\", \".\"]\n output = utils.check_output_env(cmd, env=env, cwd=os.path.dirname(repo))\n output = output.decode('utf-8')\n rev, short_id, tag, distance, branch = output.split('|')\n if tag != 'null':\n d['HG_LATEST_TAG'] = tag\n if branch == \"\":\n branch = 'default'\n d['HG_BRANCH'] = branch\n d['HG_NUM_ID'] = rev\n d['HG_LATEST_TAG_DISTANCE'] = distance\n d['HG_SHORT_ID'] = short_id\n d['HG_BUILD_STR'] = '{}_{}'.format(d['HG_NUM_ID'], d['HG_SHORT_ID'])\n return d\n\n\ndef get_dict(m, prefix=None, for_env=True, skip_build_id=False, escape_backslash=False):\n if not prefix:\n prefix = m.config.host_prefix\n\n # conda-build specific vars\n d = conda_build_vars(prefix, m.config)\n\n # languages\n d.update(python_vars(m, prefix, escape_backslash))\n d.update(perl_vars(m, prefix, escape_backslash))\n d.update(lua_vars(m, prefix, escape_backslash))\n d.update(r_vars(m, prefix, escape_backslash))\n\n if m:\n d.update(meta_vars(m, skip_build_id=skip_build_id))\n\n # system\n d.update(system_vars(d, m, prefix))\n\n # features\n d.update({feat.upper(): str(int(value)) for feat, value in\n feature_list})\n\n for k, v in m.config.variant.items():\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\n d[k] = v\n return d\n\n\ndef conda_build_vars(prefix, config):\n src_dir = config.test_dir if os.path.basename(prefix)[:2] == '_t' else config.work_dir\n return {\n 'CONDA_BUILD': '1',\n 'PYTHONNOUSERSITE': '1',\n 'CONDA_DEFAULT_ENV': config.host_prefix,\n 'ARCH': str(config.host_arch),\n # This is the one that is most important for where people put artifacts that get bundled.\n # It is fed from our function argument, and can be any of:\n # 1. Build prefix - when host requirements are not explicitly set,\n # then prefix = build prefix = host prefix\n # 2. Host prefix - when host requirements are explicitly set, prefix = host prefix\n # 3. Test prefix - during test runs, this points at the test prefix\n 'PREFIX': prefix,\n # This is for things that are specifically build tools. Things that run on the build\n # platform, but probably should not be linked against, since they may not run on the\n # destination host platform\n # It can be equivalent to config.host_prefix if the host section is not explicitly set.\n 'BUILD_PREFIX': config.build_prefix,\n 'SYS_PREFIX': sys.prefix,\n 'SYS_PYTHON': sys.executable,\n 'SUBDIR': config.host_subdir,\n 'SRC_DIR': src_dir,\n 'HTTPS_PROXY': os.getenv('HTTPS_PROXY', ''),\n 'HTTP_PROXY': os.getenv('HTTP_PROXY', ''),\n 'REQUESTS_CA_BUNDLE': os.getenv('REQUESTS_CA_BUNDLE', ''),\n 'DIRTY': '1' if config.dirty else '',\n 'ROOT': root_dir,\n }\n\n\ndef python_vars(metadata, prefix, escape_backslash):\n py_ver = get_py_ver(metadata.config)\n stdlib_dir = utils.get_stdlib_dir(prefix, py_ver)\n sp_dir = utils.get_site_packages(prefix, py_ver)\n\n if utils.on_win and escape_backslash:\n stdlib_dir = stdlib_dir.replace('\\\\', '\\\\\\\\')\n sp_dir = sp_dir.replace('\\\\', '\\\\\\\\')\n\n vars_ = {\n 'CONDA_PY': ''.join(py_ver.split('.')[:2]),\n 'PY3K': str(int(int(py_ver[0]) >= 3)),\n 'PY_VER': py_ver,\n 'STDLIB_DIR': stdlib_dir,\n 'SP_DIR': sp_dir,\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'python' in deps or metadata.name(fail_ok=True) == 'python':\n python_bin = metadata.config.python_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n python_bin = python_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PYTHON': python_bin,\n })\n\n np_ver = metadata.config.variant.get('numpy', get_default_variant(metadata.config)['numpy'])\n vars_['NPY_VER'] = '.'.join(np_ver.split('.')[:2])\n vars_['CONDA_NPY'] = ''.join(np_ver.split('.')[:2])\n vars_['NPY_DISTUTILS_APPEND_FLAGS'] = '1'\n return vars_\n\n\ndef perl_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'PERL_VER': get_perl_ver(metadata.config),\n 'CONDA_PERL': get_perl_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'perl' in deps or metadata.name(fail_ok=True) == 'perl':\n perl_bin = metadata.config.perl_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n perl_bin = perl_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PERL': perl_bin,\n })\n return vars_\n\n\ndef lua_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'LUA_VER': get_lua_ver(metadata.config),\n 'CONDA_LUA': get_lua_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'lua' in deps:\n lua_bin = metadata.config.lua_bin(prefix, metadata.config.host_subdir)\n lua_include_dir = get_lua_include_dir(metadata.config)\n\n if utils.on_win and escape_backslash:\n lua_bin = lua_bin.replace('\\\\', '\\\\\\\\')\n lua_include_dir = lua_include_dir.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'LUA': lua_bin,\n 'LUA_INCLUDE_DIR': lua_include_dir,\n })\n return vars_\n\n\ndef r_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'R_VER': get_r_ver(metadata.config),\n 'CONDA_R': get_r_ver(metadata.config),\n }\n\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'r-base' in deps or 'mro-base' in deps or metadata.name(fail_ok=True) in (\n 'r-base', 'mro-base'):\n r_bin = metadata.config.r_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n r_bin = r_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'R': r_bin,\n })\n return vars_\n\n\ndef meta_vars(meta, skip_build_id=False):\n d = {}\n for var_name in ensure_list(meta.get_value('build/script_env', [])):\n value = os.getenv(var_name)\n if value is None:\n warnings.warn(\n \"The environment variable '%s' is undefined.\" % var_name,\n UserWarning\n )\n else:\n d[var_name] = value\n warnings.warn(\n \"The environment variable '%s' is being passed through with value %s. \"\n \"If you are splitting build and test phases with --no-test, please ensure \"\n \"that this value is also set similarly at test time.\" % (var_name, value),\n UserWarning\n )\n\n folder = meta.get_value('source/0/folder', '')\n repo_dir = join(meta.config.work_dir, folder)\n git_dir = join(repo_dir, '.git')\n hg_dir = join(repo_dir, '.hg')\n\n if not isinstance(git_dir, str):\n # On Windows, subprocess env can't handle unicode.\n git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n git_exe = external.find_executable('git', meta.config.build_prefix)\n if git_exe and os.path.exists(git_dir):\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\n git_url = meta.get_value('source/0/git_url')\n\n if os.path.exists(git_url):\n if sys.platform == 'win32':\n git_url = utils.convert_unix_path_to_win(git_url)\n # If git_url is a relative path instead of a url, convert it to an abspath\n git_url = normpath(join(meta.path, git_url))\n\n _x = False\n\n if git_url:\n _x = verify_git_repo(git_exe,\n git_dir,\n git_url,\n meta.config.git_commits_since_tag,\n meta.config.debug,\n meta.get_value('source/0/git_rev', 'HEAD'))\n\n if _x or meta.get_value('source/0/path'):\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\n\n elif external.find_executable('hg', meta.config.build_prefix) and os.path.exists(hg_dir):\n d.update(get_hg_build_info(hg_dir))\n\n # use `get_value` to prevent early exit while name is still unresolved during rendering\n d['PKG_NAME'] = meta.get_value('package/name')\n d['PKG_VERSION'] = meta.version()\n d['PKG_BUILDNUM'] = str(meta.build_number() or 0)\n if meta.final and not skip_build_id:\n d['PKG_BUILD_STRING'] = str(meta.build_id())\n d['PKG_HASH'] = meta.hash_dependencies()\n else:\n d['PKG_BUILD_STRING'] = 'placeholder'\n d['PKG_HASH'] = '1234567'\n d['RECIPE_DIR'] = (meta.path if meta.path else\n meta.meta.get('extra', {}).get('parent_recipe', {}).get('path', ''))\n return d\n\n\n@memoized\ndef get_cpu_count():\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, _ = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True,\n stdout=subprocess.PIPE).communicate()\n return out.decode('utf-8').strip()\n else:\n try:\n return str(multiprocessing.cpu_count())\n except NotImplementedError:\n return \"1\"\n\n\ndef get_shlib_ext():\n # Return the shared library extension.\n if sys.platform == 'win32':\n return '.dll'\n elif sys.platform == 'darwin':\n return '.dylib'\n elif sys.platform.startswith('linux'):\n return '.so'\n else:\n raise NotImplementedError(sys.platform)\n\n\ndef windows_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n # We have gone for the clang values here.\n win_arch = 'i386' if str(m.config.host_arch) == '32' else 'amd64'\n win_msvc = '19.0.0' if PY3 else '15.0.0'\n library_prefix = join(prefix, 'Library')\n drive, tail = m.config.host_prefix.split(':')\n get_default('SCRIPTS', join(prefix, 'Scripts'))\n get_default('LIBRARY_PREFIX', library_prefix)\n get_default('LIBRARY_BIN', join(library_prefix, 'bin'))\n get_default('LIBRARY_INC', join(library_prefix, 'include'))\n get_default('LIBRARY_LIB', join(library_prefix, 'lib'))\n get_default('CYGWIN_PREFIX', ''.join(('/cygdrive/', drive.lower(), tail.replace('\\\\', '/'))))\n # see https://en.wikipedia.org/wiki/Environment_variable#Default_values\n get_default('ALLUSERSPROFILE')\n get_default('APPDATA')\n get_default('CommonProgramFiles')\n get_default('CommonProgramFiles(x86)')\n get_default('CommonProgramW6432')\n get_default('COMPUTERNAME')\n get_default('ComSpec')\n get_default('HOMEDRIVE')\n get_default('HOMEPATH')\n get_default('LOCALAPPDATA')\n get_default('LOGONSERVER')\n get_default('NUMBER_OF_PROCESSORS')\n get_default('PATHEXT')\n get_default('ProgramData')\n get_default('ProgramFiles')\n get_default('ProgramFiles(x86)')\n get_default('ProgramW6432')\n get_default('PROMPT')\n get_default('PSModulePath')\n get_default('PUBLIC')\n get_default('SystemDrive')\n get_default('SystemRoot')\n get_default('TEMP')\n get_default('TMP')\n get_default('USERDOMAIN')\n get_default('USERNAME')\n get_default('USERPROFILE')\n get_default('windir')\n # CPU data, see https://github.com/conda/conda-build/issues/2064\n get_default('PROCESSOR_ARCHITEW6432')\n get_default('PROCESSOR_ARCHITECTURE')\n get_default('PROCESSOR_IDENTIFIER')\n get_default('BUILD', win_arch + '-pc-windows-' + win_msvc)\n for env_var in os.environ.keys():\n if re.match('VS[0-9]{2,3}COMNTOOLS', env_var):\n get_default(env_var)\n\n\ndef unix_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n get_default('HOME', 'UNKNOWN')\n get_default('PKG_CONFIG_PATH', join(prefix, 'lib', 'pkgconfig'))\n get_default('CMAKE_GENERATOR', 'Unix Makefiles')\n get_default('SSL_CERT_FILE')\n\n\ndef osx_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n OSX_ARCH = 'i386' if str(m.config.host_arch) == '32' else 'x86_64'\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n # d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n get_default('OSX_ARCH', OSX_ARCH)\n get_default('MACOSX_DEPLOYMENT_TARGET', '10.9')\n get_default('BUILD', OSX_ARCH + '-apple-darwin13.4.0')\n\n\n@memoized\ndef _machine_and_architecture():\n return platform.machine(), platform.architecture()\n\n\ndef linux_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n platform_machine, platform_architecture = _machine_and_architecture()\n build_arch = platform_machine\n # Python reports x86_64 when running a i686 Python binary on a 64-bit CPU\n # unless run through linux32. Issue a warning when we detect this.\n if build_arch == 'x86_64' and platform_architecture[0] == '32bit':\n print(\"Warning: You are running 32-bit Python on a 64-bit linux installation\")\n print(\" but have not launched it via linux32. Various qeuries *will*\")\n print(\" give unexpected results (uname -m, platform.machine() etc)\")\n build_arch = 'i686'\n # the GNU triplet is powerpc, not ppc. This matters.\n if build_arch.startswith('ppc'):\n build_arch = build_arch.replace('ppc', 'powerpc')\n if build_arch.startswith('powerpc'):\n build_distro = 'cos7'\n else:\n build_distro = 'cos6'\n # There is also QEMU_SET_ENV, but that needs to be\n # filtered so it only contains the result of `linux_vars`\n # which, before this change was empty, and after it only\n # contains other QEMU env vars.\n get_default('CFLAGS')\n get_default('CXXFLAGS')\n get_default('LDFLAGS')\n get_default('QEMU_LD_PREFIX')\n get_default('QEMU_UNAME')\n get_default('DEJAGNU')\n get_default('DISPLAY')\n get_default('LD_RUN_PATH', prefix + '/lib')\n get_default('BUILD', build_arch + '-conda_' + build_distro + '-linux-gnu')\n\n\ndef set_from_os_or_variant(out_dict, key, variant, default):\n value = os.getenv(key)\n if not value:\n value = variant.get(key, default)\n if value:\n out_dict[key] = value\n\n\n@memoized\ndef system_vars(env_dict, m, prefix):\n d = dict()\n # note the dictionary is passed in here - variables are set in that dict if they are non-null\n get_default = lambda key, default='': set_from_os_or_variant(d, key, m.config.variant, default)\n\n get_default('CPU_COUNT', get_cpu_count())\n get_default('LANG')\n get_default('LC_ALL')\n get_default('MAKEFLAGS')\n d['SHLIB_EXT'] = get_shlib_ext()\n d['PATH'] = os.environ.copy()['PATH']\n\n if not m.config.activate:\n d = prepend_bin_path(d, m.config.host_prefix)\n\n if sys.platform == 'win32':\n windows_vars(m, get_default, prefix)\n else:\n unix_vars(m, get_default, prefix)\n\n if sys.platform == 'darwin':\n osx_vars(m, get_default, prefix)\n elif sys.platform.startswith('linux'):\n linux_vars(m, get_default, prefix)\n\n return d\n\n\nclass InvalidEnvironment(Exception):\n pass\n\n\n# Stripped-down Environment class from conda-tools ( https://github.com/groutr/conda-tools )\n# Vendored here to avoid the whole dependency for just this bit.\ndef _load_json(path):\n with open(path, 'r') as fin:\n x = json.load(fin)\n return x\n\n\ndef _load_all_json(path):\n \"\"\"\n Load all json files in a directory. Return dictionary with filenames mapped to json\n dictionaries.\n \"\"\"\n root, _, files = next(utils.walk(path))\n result = {}\n for f in files:\n if f.endswith('.json'):\n result[f] = _load_json(join(root, f))\n return result\n\n\nclass Environment(object):\n def __init__(self, path):\n \"\"\"\n Initialize an Environment object.\n\n To reflect changes in the underlying environment, a new Environment object should be\n created.\n \"\"\"\n self.path = path\n self._meta = join(path, 'conda-meta')\n if os.path.isdir(path) and os.path.isdir(self._meta):\n self._packages = {}\n else:\n raise InvalidEnvironment('Unable to load environment {}'.format(path))\n\n def _read_package_json(self):\n if not self._packages:\n self._packages = _load_all_json(self._meta)\n\n def package_specs(self):\n \"\"\"\n List all package specs in the environment.\n \"\"\"\n self._read_package_json()\n json_objs = self._packages.values()\n specs = []\n for i in json_objs:\n p, v, b = i['name'], i['version'], i['build']\n specs.append('{} {} {}'.format(p, v, b))\n return specs\n\n\ncached_actions = {}\nlast_index_ts = 0\n\n\ndef get_install_actions(prefix, specs, env, retries=0, subdir=None,\n verbose=True, debug=False, locking=True,\n bldpkgs_dirs=None, timeout=90, disable_pip=False,\n max_env_retry=3, output_folder=None, channel_urls=None):\n global cached_actions\n global last_index_ts\n actions = {}\n log = utils.get_logger(__name__)\n conda_log_level = logging.WARN\n specs = list(specs)\n if verbose:\n capture = contextlib.contextmanager(lambda: (yield))\n elif debug:\n capture = contextlib.contextmanager(lambda: (yield))\n conda_log_level = logging.DEBUG\n else:\n capture = utils.capture\n for feature, value in feature_list:\n if value:\n specs.append('%s@' % feature)\n\n bldpkgs_dirs = ensure_list(bldpkgs_dirs)\n\n index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder,\n channel_urls=channel_urls, debug=debug, verbose=verbose,\n locking=locking, timeout=timeout)\n specs = tuple(utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@'))\n\n if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and\n last_index_ts >= index_ts):\n actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy()\n if \"PREFIX\" in actions:\n actions['PREFIX'] = prefix\n elif specs:\n # this is hiding output like:\n # Fetching package metadata ...........\n # Solving package specifications: ..........\n with utils.LoggingContext(conda_log_level):\n with capture():\n try:\n actions = install_actions(prefix, index, specs, force=True)\n except (NoPackagesFoundError, UnsatisfiableError) as exc:\n raise DependencyNeedsBuildingError(exc, subdir=subdir)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError, AssertionError) as exc:\n if 'lock' in str(exc):\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc) or\n isinstance(exc, AssertionError)):\n locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout)\n with utils.try_acquire_locks(locks, timeout=timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retries < max_env_retry:\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n actions = get_install_actions(prefix, tuple(specs), env,\n retries=retries + 1,\n subdir=subdir,\n verbose=verbose,\n debug=debug,\n locking=locking,\n bldpkgs_dirs=tuple(bldpkgs_dirs),\n timeout=timeout,\n disable_pip=disable_pip,\n max_env_retry=max_env_retry,\n output_folder=output_folder,\n channel_urls=tuple(channel_urls))\n else:\n log.error(\"Failed to get install actions, max retries exceeded.\")\n raise\n if disable_pip:\n for pkg in ('pip', 'setuptools', 'wheel'):\n # specs are the raw specifications, not the conda-derived actual specs\n # We're testing that pip etc. are manually specified\n if not any(re.match('^%s(?:$|[\\s=].*)' % pkg, str(dep)) for dep in specs):\n actions['LINK'] = [spec for spec in actions['LINK'] if spec.name != pkg]\n utils.trim_empty_keys(actions)\n cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy()\n last_index_ts = index_ts\n return actions\n\n\ndef create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0,\n locks=None, is_cross=False, is_conda=False):\n '''\n Create a conda envrionment for the given prefix and specs.\n '''\n if config.debug:\n external_logger_context = utils.LoggingContext(logging.DEBUG)\n else:\n external_logger_context = utils.LoggingContext(logging.WARN)\n\n with external_logger_context:\n log = utils.get_logger(__name__)\n\n # if os.path.isdir(prefix):\n # utils.rm_rf(prefix)\n\n if specs_or_actions: # Don't waste time if there is nothing to do\n log.debug(\"Creating environment in %s\", prefix)\n log.debug(str(specs_or_actions))\n\n with utils.path_prepended(prefix):\n if not locks:\n locks = utils.get_conda_operation_locks(config)\n try:\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n # input is a list - it's specs in MatchSpec format\n if not hasattr(specs_or_actions, 'keys'):\n specs = list(set(specs_or_actions))\n actions = get_install_actions(prefix, tuple(specs), env,\n subdir=subdir,\n verbose=config.verbose,\n debug=config.debug,\n locking=config.locking,\n bldpkgs_dirs=tuple(config.bldpkgs_dirs),\n timeout=config.timeout,\n disable_pip=config.disable_pip,\n max_env_retry=config.max_env_retry,\n output_folder=config.output_folder,\n channel_urls=tuple(config.channel_urls))\n else:\n actions = specs_or_actions\n index, index_ts = get_build_index(subdir=subdir,\n bldpkgs_dir=config.bldpkgs_dir,\n output_folder=config.output_folder,\n channel_urls=config.channel_urls,\n debug=config.debug,\n verbose=config.verbose,\n locking=config.locking,\n timeout=config.timeout)\n utils.trim_empty_keys(actions)\n display_actions(actions, index)\n if utils.on_win:\n for k, v in os.environ.items():\n os.environ[k] = str(v)\n execute_actions(actions, index, verbose=config.debug)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError) as exc:\n if ((\"too short in\" in str(exc) or\n re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or\n isinstance(exc, PaddingError)) and\n config.prefix_length > 80):\n if config.prefix_length_fallback:\n log.warn(\"Build prefix failed with prefix length %d\",\n config.prefix_length)\n log.warn(\"Error was: \")\n log.warn(str(exc))\n log.warn(\"One or more of your package dependencies needs to be rebuilt \"\n \"with a longer prefix length.\")\n log.warn(\"Falling back to legacy prefix length of 80 characters.\")\n log.warn(\"Your package will not install into prefixes > 80 characters.\")\n config.prefix_length = 80\n\n host = '_h_env' in prefix\n # Set this here and use to create environ\n # Setting this here is important because we use it below (symlink)\n prefix = config.host_prefix if host else config.build_prefix\n actions['PREFIX'] = prefix\n\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, is_cross=is_cross)\n else:\n raise\n elif 'lock' in str(exc):\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc)):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n else:\n raise\n # HACK: some of the time, conda screws up somehow and incomplete packages result.\n # Just retry.\n except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc:\n if isinstance(exc, AssertionError):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = os.path.dirname(os.path.dirname(str(exc)))\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n\n if not is_conda:\n # Symlinking conda is critical here to make sure that activate scripts are not\n # accidentally included in packages.\n if utils.on_win:\n shell = \"cmd.exe\"\n else:\n shell = \"bash\"\n symlink_conda(prefix, sys.prefix, shell)\n\n\ndef clean_pkg_cache(dist, config):\n locks = []\n\n conda_log_level = logging.WARN\n if config.debug:\n conda_log_level = logging.DEBUG\n\n _pkgs_dirs = pkgs_dirs[:1]\n if config.locking:\n locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]\n with utils.LoggingContext(conda_log_level):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n rmplan = [\n 'RM_EXTRACTED {0} local::{0}'.format(dist),\n 'RM_FETCHED {0} local::{0}'.format(dist),\n ]\n execute_plan(rmplan)\n\n # Conda does not seem to do a complete cleanup sometimes. This is supplemental.\n # Conda's cleanup is still necessary - it keeps track of its own in-memory\n # list of downloaded things.\n for folder in pkgs_dirs:\n try:\n assert not os.path.exists(os.path.join(folder, dist))\n assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))\n for pkg_id in [dist, 'local::' + dist]:\n assert pkg_id not in package_cache()\n except AssertionError:\n log = utils.get_logger(__name__)\n log.debug(\"Conda caching error: %s package remains in cache after removal\",\n dist)\n log.debug(\"manually removing to compensate\")\n cache = package_cache()\n keys = [key for key in cache.keys() if dist in key]\n for pkg_id in keys:\n if pkg_id in cache:\n del cache[pkg_id]\n for entry in glob(os.path.join(folder, dist + '*')):\n utils.rm_rf(entry)\n\n\ndef get_pinned_deps(m, section):\n with TemporaryDirectory(prefix='_') as tmpdir:\n actions = get_install_actions(tmpdir,\n tuple(m.ms_depends(section)), section,\n subdir=m.config.target_subdir,\n debug=m.config.debug,\n verbose=m.config.verbose,\n locking=m.config.locking,\n bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),\n timeout=m.config.timeout,\n disable_pip=m.config.disable_pip,\n max_env_retry=m.config.max_env_retry,\n output_folder=m.config.output_folder,\n channel_urls=tuple(m.config.channel_urls))\n runtime_deps = [' '.join(link.dist_name.rsplit('-', 2)) for link in actions.get('LINK', [])]\n return runtime_deps\n","path":"conda_build/environ.py"}],"string":"[\n {\n \"content\": \"from __future__ import absolute_import, division, print_function\\n\\nimport contextlib\\nimport json\\nimport logging\\nimport multiprocessing\\nimport os\\nimport platform\\nimport re\\nimport subprocess\\nimport sys\\nimport warnings\\nfrom glob import glob\\nfrom os.path import join, normpath\\n\\n# noqa here because PY3 is used only on windows, and trips up flake8 otherwise.\\nfrom .conda_interface import text_type, PY3 # noqa\\nfrom .conda_interface import (CondaError, LinkError, LockError, NoPackagesFoundError,\\n PaddingError, UnsatisfiableError)\\nfrom .conda_interface import display_actions, execute_actions, execute_plan, install_actions\\nfrom .conda_interface import memoized\\nfrom .conda_interface import package_cache, TemporaryDirectory\\nfrom .conda_interface import pkgs_dirs, root_dir, symlink_conda\\n\\nfrom conda_build import utils\\nfrom conda_build.exceptions import DependencyNeedsBuildingError\\nfrom conda_build.features import feature_list\\nfrom conda_build.index import get_build_index\\nfrom conda_build.os_utils import external\\nfrom conda_build.utils import ensure_list, prepend_bin_path\\nfrom conda_build.variants import get_default_variant\\n\\n\\n# these are things that we provide env vars for more explicitly. This list disables the\\n# pass-through of variant values to env vars for these keys.\\nLANGUAGES = ('PERL', 'LUA', 'R', \\\"NUMPY\\\", 'PYTHON')\\n\\n\\ndef get_perl_ver(config):\\n return '.'.join(config.variant.get('perl', get_default_variant(config)['perl']).split('.')[:2])\\n\\n\\ndef get_lua_ver(config):\\n return '.'.join(config.variant.get('lua', get_default_variant(config)['lua']).split('.')[:2])\\n\\n\\ndef get_py_ver(config):\\n py = config.variant.get('python', get_default_variant(config)['python'])\\n if not hasattr(py, 'split'):\\n py = py[0]\\n return '.'.join(py.split('.')[:2])\\n\\n\\ndef get_r_ver(config):\\n return '.'.join(config.variant.get('r_base',\\n get_default_variant(config)['r_base']).split('.')[:3])\\n\\n\\ndef get_npy_ver(config):\\n conda_npy = ''.join(str(config.variant.get('numpy') or\\n get_default_variant(config)['numpy']).split('.'))\\n # Convert int -> string, e.g.\\n # 17 -> '1.7'\\n # 110 -> '1.10'\\n return conda_npy[0] + '.' + conda_npy[1:]\\n\\n\\ndef get_lua_include_dir(config):\\n return join(config.host_prefix, \\\"include\\\")\\n\\n\\n@memoized\\ndef verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=False,\\n expected_rev='HEAD'):\\n env = os.environ.copy()\\n log = utils.get_logger(__name__)\\n\\n if debug:\\n stderr = None\\n else:\\n FNULL = open(os.devnull, 'w')\\n stderr = FNULL\\n\\n if not expected_rev:\\n return False\\n\\n OK = True\\n\\n env['GIT_DIR'] = git_dir\\n try:\\n # Verify current commit (minus our locally applied patches) matches expected commit\\n current_commit = utils.check_output_env([git_exe,\\n \\\"log\\\",\\n \\\"-n1\\\",\\n \\\"--format=%H\\\",\\n \\\"HEAD\\\" + \\\"^\\\" * git_commits_since_tag],\\n env=env, stderr=stderr)\\n current_commit = current_commit.decode('utf-8')\\n expected_tag_commit = utils.check_output_env([git_exe, \\\"log\\\", \\\"-n1\\\", \\\"--format=%H\\\",\\n expected_rev],\\n env=env, stderr=stderr)\\n expected_tag_commit = expected_tag_commit.decode('utf-8')\\n\\n if current_commit != expected_tag_commit:\\n return False\\n\\n # Verify correct remote url. Need to find the git cache directory,\\n # and check the remote from there.\\n cache_details = utils.check_output_env([git_exe, \\\"remote\\\", \\\"-v\\\"], env=env,\\n stderr=stderr)\\n cache_details = cache_details.decode('utf-8')\\n cache_dir = cache_details.split('\\\\n')[0].split()[1]\\n\\n if not isinstance(cache_dir, str):\\n # On Windows, subprocess env can't handle unicode.\\n cache_dir = cache_dir.encode(sys.getfilesystemencoding() or 'utf-8')\\n\\n try:\\n remote_details = utils.check_output_env([git_exe, \\\"--git-dir\\\", cache_dir,\\n \\\"remote\\\", \\\"-v\\\"],\\n env=env, stderr=stderr)\\n except subprocess.CalledProcessError:\\n if sys.platform == 'win32' and cache_dir.startswith('/'):\\n cache_dir = utils.convert_unix_path_to_win(cache_dir)\\n remote_details = utils.check_output_env([git_exe, \\\"--git-dir\\\", cache_dir,\\n \\\"remote\\\", \\\"-v\\\"],\\n env=env, stderr=stderr)\\n remote_details = remote_details.decode('utf-8')\\n remote_url = remote_details.split('\\\\n')[0].split()[1]\\n\\n # on windows, remote URL comes back to us as cygwin or msys format. Python doesn't\\n # know how to normalize it. Need to convert it to a windows path.\\n if sys.platform == 'win32' and remote_url.startswith('/'):\\n remote_url = utils.convert_unix_path_to_win(git_url)\\n\\n if os.path.exists(remote_url):\\n # Local filepaths are allowed, but make sure we normalize them\\n remote_url = normpath(remote_url)\\n\\n # If the current source directory in conda-bld/work doesn't match the user's\\n # metadata git_url or git_rev, then we aren't looking at the right source.\\n if not os.path.isdir(remote_url) and remote_url.lower() != git_url.lower():\\n log.debug(\\\"remote does not match git_url\\\")\\n log.debug(\\\"Remote: \\\" + remote_url.lower())\\n log.debug(\\\"git_url: \\\" + git_url.lower())\\n OK = False\\n except subprocess.CalledProcessError as error:\\n log.debug(\\\"Error obtaining git information in verify_git_repo. Error was: \\\")\\n log.debug(str(error))\\n OK = False\\n finally:\\n if not debug:\\n FNULL.close()\\n return OK\\n\\n\\n@memoized\\ndef get_git_info(git_exe, repo, debug):\\n \\\"\\\"\\\"\\n Given a repo to a git repo, return a dictionary of:\\n GIT_DESCRIBE_TAG\\n GIT_DESCRIBE_NUMBER\\n GIT_DESCRIBE_HASH\\n GIT_FULL_HASH\\n GIT_BUILD_STR\\n from the output of git describe.\\n :return:\\n \\\"\\\"\\\"\\n d = {}\\n log = utils.get_logger(__name__)\\n\\n if debug:\\n stderr = None\\n else:\\n FNULL = open(os.devnull, 'w')\\n stderr = FNULL\\n\\n # grab information from describe\\n env = os.environ.copy()\\n env['GIT_DIR'] = repo\\n keys = [\\\"GIT_DESCRIBE_TAG\\\", \\\"GIT_DESCRIBE_NUMBER\\\", \\\"GIT_DESCRIBE_HASH\\\"]\\n\\n try:\\n output = utils.check_output_env([git_exe, \\\"describe\\\", \\\"--tags\\\", \\\"--long\\\", \\\"HEAD\\\"],\\n env=env, cwd=os.path.dirname(repo),\\n stderr=stderr).splitlines()[0]\\n output = output.decode('utf-8')\\n parts = output.rsplit('-', 2)\\n if len(parts) == 3:\\n d.update(dict(zip(keys, parts)))\\n except subprocess.CalledProcessError:\\n msg = (\\n \\\"Failed to obtain git tag information.\\\\n\\\"\\n \\\"Consider using annotated tags if you are not already \\\"\\n \\\"as they are more reliable when used with git describe.\\\"\\n )\\n log.debug(msg)\\n\\n try:\\n # get the _full_ hash of the current HEAD\\n output = utils.check_output_env([git_exe, \\\"rev-parse\\\", \\\"HEAD\\\"],\\n env=env, cwd=os.path.dirname(repo),\\n stderr=stderr).splitlines()[0]\\n output = output.decode('utf-8')\\n\\n d['GIT_FULL_HASH'] = output\\n except subprocess.CalledProcessError as error:\\n log.debug(\\\"Error obtaining git commit information. Error was: \\\")\\n log.debug(str(error))\\n\\n # set up the build string\\n if \\\"GIT_DESCRIBE_NUMBER\\\" in d and \\\"GIT_DESCRIBE_HASH\\\" in d:\\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[\\\"GIT_DESCRIBE_NUMBER\\\"],\\n d[\\\"GIT_DESCRIBE_HASH\\\"])\\n\\n # issues on Windows with the next line of the command prompt being recorded here.\\n assert not any(\\\"\\\\n\\\" in value for value in d.values())\\n return d\\n\\n\\ndef get_hg_build_info(repo):\\n env = os.environ.copy()\\n env['HG_DIR'] = repo\\n env = {str(key): str(value) for key, value in env.items()}\\n\\n d = {}\\n cmd = [\\\"hg\\\", \\\"log\\\", \\\"--template\\\",\\n \\\"{rev}|{node|short}|{latesttag}|{latesttagdistance}|{branch}\\\",\\n \\\"--rev\\\", \\\".\\\"]\\n output = utils.check_output_env(cmd, env=env, cwd=os.path.dirname(repo))\\n output = output.decode('utf-8')\\n rev, short_id, tag, distance, branch = output.split('|')\\n if tag != 'null':\\n d['HG_LATEST_TAG'] = tag\\n if branch == \\\"\\\":\\n branch = 'default'\\n d['HG_BRANCH'] = branch\\n d['HG_NUM_ID'] = rev\\n d['HG_LATEST_TAG_DISTANCE'] = distance\\n d['HG_SHORT_ID'] = short_id\\n d['HG_BUILD_STR'] = '{}_{}'.format(d['HG_NUM_ID'], d['HG_SHORT_ID'])\\n return d\\n\\n\\ndef get_dict(m, prefix=None, for_env=True, skip_build_id=False, escape_backslash=False):\\n if not prefix:\\n prefix = m.config.host_prefix\\n\\n # conda-build specific vars\\n d = conda_build_vars(prefix, m.config)\\n\\n # languages\\n d.update(python_vars(m, prefix, escape_backslash))\\n d.update(perl_vars(m, prefix, escape_backslash))\\n d.update(lua_vars(m, prefix, escape_backslash))\\n d.update(r_vars(m, prefix, escape_backslash))\\n\\n if m:\\n d.update(meta_vars(m, skip_build_id=skip_build_id))\\n\\n # system\\n d.update(system_vars(d, m, prefix))\\n\\n # features\\n d.update({feat.upper(): str(int(value)) for feat, value in\\n feature_list})\\n\\n for k, v in m.config.variant.items():\\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\\n d[k] = v\\n return d\\n\\n\\ndef conda_build_vars(prefix, config):\\n src_dir = config.test_dir if os.path.basename(prefix)[:2] == '_t' else config.work_dir\\n return {\\n 'CONDA_BUILD': '1',\\n 'PYTHONNOUSERSITE': '1',\\n 'CONDA_DEFAULT_ENV': config.host_prefix,\\n 'ARCH': str(config.host_arch),\\n # This is the one that is most important for where people put artifacts that get bundled.\\n # It is fed from our function argument, and can be any of:\\n # 1. Build prefix - when host requirements are not explicitly set,\\n # then prefix = build prefix = host prefix\\n # 2. Host prefix - when host requirements are explicitly set, prefix = host prefix\\n # 3. Test prefix - during test runs, this points at the test prefix\\n 'PREFIX': prefix,\\n # This is for things that are specifically build tools. Things that run on the build\\n # platform, but probably should not be linked against, since they may not run on the\\n # destination host platform\\n # It can be equivalent to config.host_prefix if the host section is not explicitly set.\\n 'BUILD_PREFIX': config.build_prefix,\\n 'SYS_PREFIX': sys.prefix,\\n 'SYS_PYTHON': sys.executable,\\n 'SUBDIR': config.host_subdir,\\n 'SRC_DIR': src_dir,\\n 'HTTPS_PROXY': os.getenv('HTTPS_PROXY', ''),\\n 'HTTP_PROXY': os.getenv('HTTP_PROXY', ''),\\n 'REQUESTS_CA_BUNDLE': os.getenv('REQUESTS_CA_BUNDLE', ''),\\n 'DIRTY': '1' if config.dirty else '',\\n 'ROOT': root_dir,\\n }\\n\\n\\ndef python_vars(metadata, prefix, escape_backslash):\\n py_ver = get_py_ver(metadata.config)\\n stdlib_dir = utils.get_stdlib_dir(prefix, py_ver)\\n sp_dir = utils.get_site_packages(prefix, py_ver)\\n\\n if utils.on_win and escape_backslash:\\n stdlib_dir = stdlib_dir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n sp_dir = sp_dir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_ = {\\n 'CONDA_PY': ''.join(py_ver.split('.')[:2]),\\n 'PY3K': str(int(int(py_ver[0]) >= 3)),\\n 'PY_VER': py_ver,\\n 'STDLIB_DIR': stdlib_dir,\\n 'SP_DIR': sp_dir,\\n }\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'python' in deps or metadata.name(fail_ok=True) == 'python':\\n python_bin = metadata.config.python_bin(prefix, metadata.config.host_subdir)\\n\\n if utils.on_win and escape_backslash:\\n python_bin = python_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n # host prefix is always fine, because it is the same as build when is_cross is False\\n 'PYTHON': python_bin,\\n })\\n\\n np_ver = metadata.config.variant.get('numpy', get_default_variant(metadata.config)['numpy'])\\n vars_['NPY_VER'] = '.'.join(np_ver.split('.')[:2])\\n vars_['CONDA_NPY'] = ''.join(np_ver.split('.')[:2])\\n vars_['NPY_DISTUTILS_APPEND_FLAGS'] = '1'\\n return vars_\\n\\n\\ndef perl_vars(metadata, prefix, escape_backslash):\\n vars_ = {\\n 'PERL_VER': get_perl_ver(metadata.config),\\n 'CONDA_PERL': get_perl_ver(metadata.config),\\n }\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'perl' in deps or metadata.name(fail_ok=True) == 'perl':\\n perl_bin = metadata.config.perl_bin(prefix, metadata.config.host_subdir)\\n\\n if utils.on_win and escape_backslash:\\n perl_bin = perl_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n # host prefix is always fine, because it is the same as build when is_cross is False\\n 'PERL': perl_bin,\\n })\\n return vars_\\n\\n\\ndef lua_vars(metadata, prefix, escape_backslash):\\n vars_ = {\\n 'LUA_VER': get_lua_ver(metadata.config),\\n 'CONDA_LUA': get_lua_ver(metadata.config),\\n }\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'lua' in deps:\\n lua_bin = metadata.config.lua_bin(prefix, metadata.config.host_subdir)\\n lua_include_dir = get_lua_include_dir(metadata.config)\\n\\n if utils.on_win and escape_backslash:\\n lua_bin = lua_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n lua_include_dir = lua_include_dir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n 'LUA': lua_bin,\\n 'LUA_INCLUDE_DIR': lua_include_dir,\\n })\\n return vars_\\n\\n\\ndef r_vars(metadata, prefix, escape_backslash):\\n vars_ = {\\n 'R_VER': get_r_ver(metadata.config),\\n 'CONDA_R': get_r_ver(metadata.config),\\n }\\n\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'r-base' in deps or 'mro-base' in deps or metadata.name(fail_ok=True) in (\\n 'r-base', 'mro-base'):\\n r_bin = metadata.config.r_bin(prefix, metadata.config.host_subdir)\\n\\n if utils.on_win and escape_backslash:\\n r_bin = r_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n 'R': r_bin,\\n })\\n return vars_\\n\\n\\ndef meta_vars(meta, skip_build_id=False):\\n d = {}\\n for var_name in ensure_list(meta.get_value('build/script_env', [])):\\n value = os.getenv(var_name)\\n if value is None:\\n warnings.warn(\\n \\\"The environment variable '%s' is undefined.\\\" % var_name,\\n UserWarning\\n )\\n else:\\n d[var_name] = value\\n warnings.warn(\\n \\\"The environment variable '%s' is being passed through with value %s. \\\"\\n \\\"If you are splitting build and test phases with --no-test, please ensure \\\"\\n \\\"that this value is also set similarly at test time.\\\" % (var_name, value),\\n UserWarning\\n )\\n\\n folder = meta.get_value('source/0/folder', '')\\n repo_dir = join(meta.config.work_dir, folder)\\n git_dir = join(repo_dir, '.git')\\n hg_dir = join(repo_dir, '.hg')\\n\\n if not isinstance(git_dir, str):\\n # On Windows, subprocess env can't handle unicode.\\n git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')\\n\\n git_exe = external.find_executable('git', meta.config.build_prefix)\\n if git_exe and os.path.exists(git_dir):\\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\\n git_url = meta.get_value('source/0/git_url')\\n\\n if os.path.exists(git_url):\\n if sys.platform == 'win32':\\n git_url = utils.convert_unix_path_to_win(git_url)\\n # If git_url is a relative path instead of a url, convert it to an abspath\\n git_url = normpath(join(meta.path, git_url))\\n\\n _x = False\\n\\n if git_url:\\n _x = verify_git_repo(git_exe,\\n git_dir,\\n git_url,\\n meta.config.git_commits_since_tag,\\n meta.config.debug,\\n meta.get_value('source/0/git_rev', 'HEAD'))\\n\\n if _x or meta.get_value('source/0/path'):\\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\\n\\n elif external.find_executable('hg', meta.config.build_prefix) and os.path.exists(hg_dir):\\n d.update(get_hg_build_info(hg_dir))\\n\\n # use `get_value` to prevent early exit while name is still unresolved during rendering\\n d['PKG_NAME'] = meta.get_value('package/name')\\n d['PKG_VERSION'] = meta.version()\\n d['PKG_BUILDNUM'] = str(meta.build_number() or 0)\\n if meta.final and not skip_build_id:\\n d['PKG_BUILD_STRING'] = str(meta.build_id())\\n d['PKG_HASH'] = meta.hash_dependencies()\\n else:\\n d['PKG_BUILD_STRING'] = 'placeholder'\\n d['PKG_HASH'] = '1234567'\\n d['RECIPE_DIR'] = (meta.path if meta.path else\\n meta.meta.get('extra', {}).get('parent_recipe', {}).get('path', ''))\\n return d\\n\\n\\n@memoized\\ndef get_cpu_count():\\n if sys.platform == \\\"darwin\\\":\\n # multiprocessing.cpu_count() is not reliable on OSX\\n # See issue #645 on github.com/conda/conda-build\\n out, _ = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True,\\n stdout=subprocess.PIPE).communicate()\\n return out.decode('utf-8').strip()\\n else:\\n try:\\n return str(multiprocessing.cpu_count())\\n except NotImplementedError:\\n return \\\"1\\\"\\n\\n\\ndef get_shlib_ext():\\n # Return the shared library extension.\\n if sys.platform == 'win32':\\n return '.dll'\\n elif sys.platform == 'darwin':\\n return '.dylib'\\n elif sys.platform.startswith('linux'):\\n return '.so'\\n else:\\n raise NotImplementedError(sys.platform)\\n\\n\\ndef windows_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n # We have gone for the clang values here.\\n win_arch = 'i386' if str(m.config.host_arch) == '32' else 'amd64'\\n win_msvc = '19.0.0' if PY3 else '15.0.0'\\n library_prefix = join(prefix, 'Library')\\n drive, tail = m.config.host_prefix.split(':')\\n get_default('SCRIPTS', join(prefix, 'Scripts'))\\n get_default('LIBRARY_PREFIX', library_prefix)\\n get_default('LIBRARY_BIN', join(library_prefix, 'bin'))\\n get_default('LIBRARY_INC', join(library_prefix, 'include'))\\n get_default('LIBRARY_LIB', join(library_prefix, 'lib'))\\n get_default('CYGWIN_PREFIX', ''.join(('/cygdrive/', drive.lower(), tail.replace('\\\\\\\\', '/'))))\\n # see https://en.wikipedia.org/wiki/Environment_variable#Default_values\\n get_default('ALLUSERSPROFILE')\\n get_default('APPDATA')\\n get_default('CommonProgramFiles')\\n get_default('CommonProgramFiles(x86)')\\n get_default('CommonProgramW6432')\\n get_default('COMPUTERNAME')\\n get_default('ComSpec')\\n get_default('HOMEDRIVE')\\n get_default('HOMEPATH')\\n get_default('LOCALAPPDATA')\\n get_default('LOGONSERVER')\\n get_default('NUMBER_OF_PROCESSORS')\\n get_default('PATHEXT')\\n get_default('ProgramData')\\n get_default('ProgramFiles')\\n get_default('ProgramFiles(x86)')\\n get_default('ProgramW6432')\\n get_default('PROMPT')\\n get_default('PSModulePath')\\n get_default('PUBLIC')\\n get_default('SystemDrive')\\n get_default('SystemRoot')\\n get_default('TEMP')\\n get_default('TMP')\\n get_default('USERDOMAIN')\\n get_default('USERNAME')\\n get_default('USERPROFILE')\\n get_default('windir')\\n # CPU data, see https://github.com/conda/conda-build/issues/2064\\n get_default('PROCESSOR_ARCHITEW6432')\\n get_default('PROCESSOR_ARCHITECTURE')\\n get_default('PROCESSOR_IDENTIFIER')\\n get_default('BUILD', win_arch + '-pc-windows-' + win_msvc)\\n for env_var in os.environ.keys():\\n if re.match('VS[0-9]{2,3}COMNTOOLS', env_var):\\n get_default(env_var)\\n\\n\\ndef unix_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n get_default('HOME', 'UNKNOWN')\\n get_default('PKG_CONFIG_PATH', join(prefix, 'lib', 'pkgconfig'))\\n get_default('CMAKE_GENERATOR', 'Unix Makefiles')\\n get_default('SSL_CERT_FILE')\\n\\n\\ndef osx_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n OSX_ARCH = 'i386' if str(m.config.host_arch) == '32' else 'x86_64'\\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\\n # rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\\n # d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\\n get_default('OSX_ARCH', OSX_ARCH)\\n get_default('MACOSX_DEPLOYMENT_TARGET', '10.9')\\n get_default('BUILD', OSX_ARCH + '-apple-darwin13.4.0')\\n\\n\\n@memoized\\ndef _machine_and_architecture():\\n return platform.machine(), platform.architecture()\\n\\n\\ndef linux_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n platform_machine, platform_architecture = _machine_and_architecture()\\n build_arch = platform_machine\\n # Python reports x86_64 when running a i686 Python binary on a 64-bit CPU\\n # unless run through linux32. Issue a warning when we detect this.\\n if build_arch == 'x86_64' and platform_architecture[0] == '32bit':\\n print(\\\"Warning: You are running 32-bit Python on a 64-bit linux installation\\\")\\n print(\\\" but have not launched it via linux32. Various qeuries *will*\\\")\\n print(\\\" give unexpected results (uname -m, platform.machine() etc)\\\")\\n build_arch = 'i686'\\n # the GNU triplet is powerpc, not ppc. This matters.\\n if build_arch.startswith('ppc'):\\n build_arch = build_arch.replace('ppc', 'powerpc')\\n if build_arch.startswith('powerpc'):\\n build_distro = 'cos7'\\n else:\\n build_distro = 'cos6'\\n # There is also QEMU_SET_ENV, but that needs to be\\n # filtered so it only contains the result of `linux_vars`\\n # which, before this change was empty, and after it only\\n # contains other QEMU env vars.\\n get_default('CFLAGS')\\n get_default('CXXFLAGS')\\n get_default('LDFLAGS')\\n get_default('QEMU_LD_PREFIX')\\n get_default('QEMU_UNAME')\\n get_default('DEJAGNU')\\n get_default('DISPLAY')\\n get_default('LD_RUN_PATH', prefix + '/lib')\\n get_default('BUILD', build_arch + '-conda_' + build_distro + '-linux-gnu')\\n\\n\\ndef set_from_os_or_variant(out_dict, key, variant, default):\\n value = os.getenv(key)\\n if not value:\\n value = variant.get(key, default)\\n if value:\\n out_dict[key] = value\\n\\n\\n@memoized\\ndef system_vars(env_dict, m, prefix):\\n d = dict()\\n # note the dictionary is passed in here - variables are set in that dict if they are non-null\\n get_default = lambda key, default='': set_from_os_or_variant(d, key, m.config.variant, default)\\n\\n get_default('CPU_COUNT', get_cpu_count())\\n get_default('LANG')\\n get_default('LC_ALL')\\n get_default('MAKEFLAGS')\\n d['SHLIB_EXT'] = get_shlib_ext()\\n d['PATH'] = os.environ.copy()['PATH']\\n\\n if not m.config.activate:\\n d = prepend_bin_path(d, m.config.host_prefix)\\n\\n if sys.platform == 'win32':\\n windows_vars(m, get_default, prefix)\\n else:\\n unix_vars(m, get_default, prefix)\\n\\n if sys.platform == 'darwin':\\n osx_vars(m, get_default, prefix)\\n elif sys.platform.startswith('linux'):\\n linux_vars(m, get_default, prefix)\\n\\n return d\\n\\n\\nclass InvalidEnvironment(Exception):\\n pass\\n\\n\\n# Stripped-down Environment class from conda-tools ( https://github.com/groutr/conda-tools )\\n# Vendored here to avoid the whole dependency for just this bit.\\ndef _load_json(path):\\n with open(path, 'r') as fin:\\n x = json.load(fin)\\n return x\\n\\n\\ndef _load_all_json(path):\\n \\\"\\\"\\\"\\n Load all json files in a directory. Return dictionary with filenames mapped to json\\n dictionaries.\\n \\\"\\\"\\\"\\n root, _, files = next(utils.walk(path))\\n result = {}\\n for f in files:\\n if f.endswith('.json'):\\n result[f] = _load_json(join(root, f))\\n return result\\n\\n\\nclass Environment(object):\\n def __init__(self, path):\\n \\\"\\\"\\\"\\n Initialize an Environment object.\\n\\n To reflect changes in the underlying environment, a new Environment object should be\\n created.\\n \\\"\\\"\\\"\\n self.path = path\\n self._meta = join(path, 'conda-meta')\\n if os.path.isdir(path) and os.path.isdir(self._meta):\\n self._packages = {}\\n else:\\n raise InvalidEnvironment('Unable to load environment {}'.format(path))\\n\\n def _read_package_json(self):\\n if not self._packages:\\n self._packages = _load_all_json(self._meta)\\n\\n def package_specs(self):\\n \\\"\\\"\\\"\\n List all package specs in the environment.\\n \\\"\\\"\\\"\\n self._read_package_json()\\n json_objs = self._packages.values()\\n specs = []\\n for i in json_objs:\\n p, v, b = i['name'], i['version'], i['build']\\n specs.append('{} {} {}'.format(p, v, b))\\n return specs\\n\\n\\ncached_actions = {}\\nlast_index_ts = 0\\n\\n\\ndef get_install_actions(prefix, specs, env, retries=0, subdir=None,\\n verbose=True, debug=False, locking=True,\\n bldpkgs_dirs=None, timeout=90, disable_pip=False,\\n max_env_retry=3, output_folder=None, channel_urls=None):\\n global cached_actions\\n global last_index_ts\\n actions = {}\\n log = utils.get_logger(__name__)\\n conda_log_level = logging.WARN\\n specs = list(specs)\\n if verbose:\\n capture = contextlib.contextmanager(lambda: (yield))\\n elif debug:\\n capture = contextlib.contextmanager(lambda: (yield))\\n conda_log_level = logging.DEBUG\\n else:\\n capture = utils.capture\\n for feature, value in feature_list:\\n if value:\\n specs.append('%s@' % feature)\\n\\n bldpkgs_dirs = ensure_list(bldpkgs_dirs)\\n\\n index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder,\\n channel_urls=channel_urls, debug=debug, verbose=verbose,\\n locking=locking, timeout=timeout)\\n specs = tuple(utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@'))\\n\\n if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and\\n last_index_ts >= index_ts):\\n actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy()\\n if \\\"PREFIX\\\" in actions:\\n actions['PREFIX'] = prefix\\n elif specs:\\n # this is hiding output like:\\n # Fetching package metadata ...........\\n # Solving package specifications: ..........\\n with utils.LoggingContext(conda_log_level):\\n with capture():\\n try:\\n actions = install_actions(prefix, index, specs, force=True)\\n except (NoPackagesFoundError, UnsatisfiableError) as exc:\\n raise DependencyNeedsBuildingError(exc, subdir=subdir)\\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\\n CondaError, AssertionError) as exc:\\n if 'lock' in str(exc):\\n log.warn(\\\"failed to get install actions, retrying. exception was: %s\\\",\\n str(exc))\\n elif ('requires a minimum conda version' in str(exc) or\\n 'link a source that does not' in str(exc) or\\n isinstance(exc, AssertionError)):\\n locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout)\\n with utils.try_acquire_locks(locks, timeout=timeout):\\n pkg_dir = str(exc)\\n folder = 0\\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\\n pkg_dir = os.path.dirname(pkg_dir)\\n folder += 1\\n log.warn(\\\"I think conda ended up with a partial extraction for %s. \\\"\\n \\\"Removing the folder and retrying\\\", pkg_dir)\\n if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir):\\n utils.rm_rf(pkg_dir)\\n if retries < max_env_retry:\\n log.warn(\\\"failed to get install actions, retrying. exception was: %s\\\",\\n str(exc))\\n actions = get_install_actions(prefix, tuple(specs), env,\\n retries=retries + 1,\\n subdir=subdir,\\n verbose=verbose,\\n debug=debug,\\n locking=locking,\\n bldpkgs_dirs=tuple(bldpkgs_dirs),\\n timeout=timeout,\\n disable_pip=disable_pip,\\n max_env_retry=max_env_retry,\\n output_folder=output_folder,\\n channel_urls=tuple(channel_urls))\\n else:\\n log.error(\\\"Failed to get install actions, max retries exceeded.\\\")\\n raise\\n if disable_pip:\\n for pkg in ('pip', 'setuptools', 'wheel'):\\n # specs are the raw specifications, not the conda-derived actual specs\\n # We're testing that pip etc. are manually specified\\n if not any(re.match('^%s(?:$|[\\\\s=].*)' % pkg, str(dep)) for dep in specs):\\n actions['LINK'] = [spec for spec in actions['LINK'] if spec.name != pkg]\\n utils.trim_empty_keys(actions)\\n cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy()\\n last_index_ts = index_ts\\n return actions\\n\\n\\ndef create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0,\\n locks=None, is_cross=False, is_conda=False):\\n '''\\n Create a conda envrionment for the given prefix and specs.\\n '''\\n if config.debug:\\n external_logger_context = utils.LoggingContext(logging.DEBUG)\\n else:\\n external_logger_context = utils.LoggingContext(logging.WARN)\\n\\n with external_logger_context:\\n log = utils.get_logger(__name__)\\n\\n # if os.path.isdir(prefix):\\n # utils.rm_rf(prefix)\\n\\n if specs_or_actions: # Don't waste time if there is nothing to do\\n log.debug(\\\"Creating environment in %s\\\", prefix)\\n log.debug(str(specs_or_actions))\\n\\n with utils.path_prepended(prefix):\\n if not locks:\\n locks = utils.get_conda_operation_locks(config)\\n try:\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n # input is a list - it's specs in MatchSpec format\\n if not hasattr(specs_or_actions, 'keys'):\\n specs = list(set(specs_or_actions))\\n actions = get_install_actions(prefix, tuple(specs), env,\\n subdir=subdir,\\n verbose=config.verbose,\\n debug=config.debug,\\n locking=config.locking,\\n bldpkgs_dirs=tuple(config.bldpkgs_dirs),\\n timeout=config.timeout,\\n disable_pip=config.disable_pip,\\n max_env_retry=config.max_env_retry,\\n output_folder=config.output_folder,\\n channel_urls=tuple(config.channel_urls))\\n else:\\n actions = specs_or_actions\\n index, index_ts = get_build_index(subdir=subdir,\\n bldpkgs_dir=config.bldpkgs_dir,\\n output_folder=config.output_folder,\\n channel_urls=config.channel_urls,\\n debug=config.debug,\\n verbose=config.verbose,\\n locking=config.locking,\\n timeout=config.timeout)\\n utils.trim_empty_keys(actions)\\n display_actions(actions, index)\\n if utils.on_win:\\n for k, v in os.environ.items():\\n os.environ[k] = str(v)\\n execute_actions(actions, index, verbose=config.debug)\\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\\n CondaError) as exc:\\n if ((\\\"too short in\\\" in str(exc) or\\n re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or\\n isinstance(exc, PaddingError)) and\\n config.prefix_length > 80):\\n if config.prefix_length_fallback:\\n log.warn(\\\"Build prefix failed with prefix length %d\\\",\\n config.prefix_length)\\n log.warn(\\\"Error was: \\\")\\n log.warn(str(exc))\\n log.warn(\\\"One or more of your package dependencies needs to be rebuilt \\\"\\n \\\"with a longer prefix length.\\\")\\n log.warn(\\\"Falling back to legacy prefix length of 80 characters.\\\")\\n log.warn(\\\"Your package will not install into prefixes > 80 characters.\\\")\\n config.prefix_length = 80\\n\\n host = '_h_env' in prefix\\n # Set this here and use to create environ\\n # Setting this here is important because we use it below (symlink)\\n prefix = config.host_prefix if host else config.build_prefix\\n actions['PREFIX'] = prefix\\n\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, is_cross=is_cross)\\n else:\\n raise\\n elif 'lock' in str(exc):\\n if retry < config.max_env_retry:\\n log.warn(\\\"failed to create env, retrying. exception was: %s\\\", str(exc))\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\\n elif ('requires a minimum conda version' in str(exc) or\\n 'link a source that does not' in str(exc)):\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n pkg_dir = str(exc)\\n folder = 0\\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\\n pkg_dir = os.path.dirname(pkg_dir)\\n folder += 1\\n log.warn(\\\"I think conda ended up with a partial extraction for %s. \\\"\\n \\\"Removing the folder and retrying\\\", pkg_dir)\\n if os.path.isdir(pkg_dir):\\n utils.rm_rf(pkg_dir)\\n if retry < config.max_env_retry:\\n log.warn(\\\"failed to create env, retrying. exception was: %s\\\", str(exc))\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\\n else:\\n log.error(\\\"Failed to create env, max retries exceeded.\\\")\\n raise\\n else:\\n raise\\n # HACK: some of the time, conda screws up somehow and incomplete packages result.\\n # Just retry.\\n except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc:\\n if isinstance(exc, AssertionError):\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n pkg_dir = os.path.dirname(os.path.dirname(str(exc)))\\n log.warn(\\\"I think conda ended up with a partial extraction for %s. \\\"\\n \\\"Removing the folder and retrying\\\", pkg_dir)\\n if os.path.isdir(pkg_dir):\\n utils.rm_rf(pkg_dir)\\n if retry < config.max_env_retry:\\n log.warn(\\\"failed to create env, retrying. exception was: %s\\\", str(exc))\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\\n else:\\n log.error(\\\"Failed to create env, max retries exceeded.\\\")\\n raise\\n\\n if not is_conda:\\n # Symlinking conda is critical here to make sure that activate scripts are not\\n # accidentally included in packages.\\n if utils.on_win:\\n shell = \\\"cmd.exe\\\"\\n else:\\n shell = \\\"bash\\\"\\n symlink_conda(prefix, sys.prefix, shell)\\n\\n\\ndef clean_pkg_cache(dist, config):\\n locks = []\\n\\n conda_log_level = logging.WARN\\n if config.debug:\\n conda_log_level = logging.DEBUG\\n\\n _pkgs_dirs = pkgs_dirs[:1]\\n if config.locking:\\n locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]\\n with utils.LoggingContext(conda_log_level):\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n rmplan = [\\n 'RM_EXTRACTED {0} local::{0}'.format(dist),\\n 'RM_FETCHED {0} local::{0}'.format(dist),\\n ]\\n execute_plan(rmplan)\\n\\n # Conda does not seem to do a complete cleanup sometimes. This is supplemental.\\n # Conda's cleanup is still necessary - it keeps track of its own in-memory\\n # list of downloaded things.\\n for folder in pkgs_dirs:\\n try:\\n assert not os.path.exists(os.path.join(folder, dist))\\n assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))\\n for pkg_id in [dist, 'local::' + dist]:\\n assert pkg_id not in package_cache()\\n except AssertionError:\\n log = utils.get_logger(__name__)\\n log.debug(\\\"Conda caching error: %s package remains in cache after removal\\\",\\n dist)\\n log.debug(\\\"manually removing to compensate\\\")\\n cache = package_cache()\\n keys = [key for key in cache.keys() if dist in key]\\n for pkg_id in keys:\\n if pkg_id in cache:\\n del cache[pkg_id]\\n for entry in glob(os.path.join(folder, dist + '*')):\\n utils.rm_rf(entry)\\n\\n\\ndef get_pinned_deps(m, section):\\n with TemporaryDirectory(prefix='_') as tmpdir:\\n actions = get_install_actions(tmpdir,\\n tuple(m.ms_depends(section)), section,\\n subdir=m.config.target_subdir,\\n debug=m.config.debug,\\n verbose=m.config.verbose,\\n locking=m.config.locking,\\n bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),\\n timeout=m.config.timeout,\\n disable_pip=m.config.disable_pip,\\n max_env_retry=m.config.max_env_retry,\\n output_folder=m.config.output_folder,\\n channel_urls=tuple(m.config.channel_urls))\\n runtime_deps = [' '.join(link.dist_name.rsplit('-', 2)) for link in actions.get('LINK', [])]\\n return runtime_deps\\n\",\n \"path\": \"conda_build/environ.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"from __future__ import absolute_import, division, print_function\n\nimport contextlib\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport warnings\nfrom glob import glob\nfrom os.path import join, normpath\n\n# noqa here because PY3 is used only on windows, and trips up flake8 otherwise.\nfrom .conda_interface import text_type, PY3 # noqa\nfrom .conda_interface import (CondaError, LinkError, LockError, NoPackagesFoundError,\n PaddingError, UnsatisfiableError)\nfrom .conda_interface import display_actions, execute_actions, execute_plan, install_actions\nfrom .conda_interface import memoized\nfrom .conda_interface import package_cache, TemporaryDirectory\nfrom .conda_interface import pkgs_dirs, root_dir, symlink_conda\n\nfrom conda_build import utils\nfrom conda_build.exceptions import DependencyNeedsBuildingError\nfrom conda_build.features import feature_list\nfrom conda_build.index import get_build_index\nfrom conda_build.os_utils import external\nfrom conda_build.utils import ensure_list, prepend_bin_path\nfrom conda_build.variants import get_default_variant\n\n\n# these are things that we provide env vars for more explicitly. This list disables the\n# pass-through of variant values to env vars for these keys.\nLANGUAGES = ('PERL', 'LUA', 'R', \"NUMPY\", 'PYTHON')\n\n\ndef get_perl_ver(config):\n return '.'.join(config.variant.get('perl', get_default_variant(config)['perl']).split('.')[:2])\n\n\ndef get_lua_ver(config):\n return '.'.join(config.variant.get('lua', get_default_variant(config)['lua']).split('.')[:2])\n\n\ndef get_py_ver(config):\n py = config.variant.get('python', get_default_variant(config)['python'])\n if not hasattr(py, 'split'):\n py = py[0]\n return '.'.join(py.split('.')[:2])\n\n\ndef get_r_ver(config):\n return '.'.join(config.variant.get('r_base',\n get_default_variant(config)['r_base']).split('.')[:3])\n\n\ndef get_npy_ver(config):\n conda_npy = ''.join(str(config.variant.get('numpy') or\n get_default_variant(config)['numpy']).split('.'))\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n return conda_npy[0] + '.' + conda_npy[1:]\n\n\ndef get_lua_include_dir(config):\n return join(config.host_prefix, \"include\")\n\n\n@memoized\ndef verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=False,\n expected_rev='HEAD'):\n env = os.environ.copy()\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n if not expected_rev:\n return False\n\n OK = True\n\n env['GIT_DIR'] = git_dir\n try:\n # Verify current commit (minus our locally applied patches) matches expected commit\n current_commit = utils.check_output_env([git_exe,\n \"log\",\n \"-n1\",\n \"--format=%H\",\n \"HEAD\" + \"^\" * git_commits_since_tag],\n env=env, stderr=stderr)\n current_commit = current_commit.decode('utf-8')\n expected_tag_commit = utils.check_output_env([git_exe, \"log\", \"-n1\", \"--format=%H\",\n expected_rev],\n env=env, stderr=stderr)\n expected_tag_commit = expected_tag_commit.decode('utf-8')\n\n if current_commit != expected_tag_commit:\n return False\n\n # Verify correct remote url. Need to find the git cache directory,\n # and check the remote from there.\n cache_details = utils.check_output_env([git_exe, \"remote\", \"-v\"], env=env,\n stderr=stderr)\n cache_details = cache_details.decode('utf-8')\n cache_dir = cache_details.split('\\n')[0].split()[1]\n\n if not isinstance(cache_dir, str):\n # On Windows, subprocess env can't handle unicode.\n cache_dir = cache_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n try:\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n except subprocess.CalledProcessError:\n if sys.platform == 'win32' and cache_dir.startswith('/'):\n cache_dir = utils.convert_unix_path_to_win(cache_dir)\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n remote_details = remote_details.decode('utf-8')\n remote_url = remote_details.split('\\n')[0].split()[1]\n\n # on windows, remote URL comes back to us as cygwin or msys format. Python doesn't\n # know how to normalize it. Need to convert it to a windows path.\n if sys.platform == 'win32' and remote_url.startswith('/'):\n remote_url = utils.convert_unix_path_to_win(git_url)\n\n if os.path.exists(remote_url):\n # Local filepaths are allowed, but make sure we normalize them\n remote_url = normpath(remote_url)\n\n # If the current source directory in conda-bld/work doesn't match the user's\n # metadata git_url or git_rev, then we aren't looking at the right source.\n if not os.path.isdir(remote_url) and remote_url.lower() != git_url.lower():\n log.debug(\"remote does not match git_url\")\n log.debug(\"Remote: \" + remote_url.lower())\n log.debug(\"git_url: \" + git_url.lower())\n OK = False\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git information in verify_git_repo. Error was: \")\n log.debug(str(error))\n OK = False\n finally:\n if not debug:\n FNULL.close()\n return OK\n\n\ndef get_git_info(git_exe, repo, debug):\n \"\"\"\n Given a repo to a git repo, return a dictionary of:\n GIT_DESCRIBE_TAG\n GIT_DESCRIBE_NUMBER\n GIT_DESCRIBE_HASH\n GIT_FULL_HASH\n GIT_BUILD_STR\n from the output of git describe.\n :return:\n \"\"\"\n d = {}\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n # grab information from describe\n env = os.environ.copy()\n env['GIT_DIR'] = repo\n keys = [\"GIT_DESCRIBE_TAG\", \"GIT_DESCRIBE_NUMBER\", \"GIT_DESCRIBE_HASH\"]\n\n try:\n output = utils.check_output_env([git_exe, \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n if len(parts) == 3:\n d.update(dict(zip(keys, parts)))\n except subprocess.CalledProcessError:\n msg = (\n \"Failed to obtain git tag information.\\n\"\n \"Consider using annotated tags if you are not already \"\n \"as they are more reliable when used with git describe.\"\n )\n log.debug(msg)\n\n try:\n # get the _full_ hash of the current HEAD\n output = utils.check_output_env([git_exe, \"rev-parse\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n\n d['GIT_FULL_HASH'] = output\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git commit information. Error was: \")\n log.debug(str(error))\n\n # set up the build string\n if \"GIT_DESCRIBE_NUMBER\" in d and \"GIT_DESCRIBE_HASH\" in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[\"GIT_DESCRIBE_NUMBER\"],\n d[\"GIT_DESCRIBE_HASH\"])\n\n # issues on Windows with the next line of the command prompt being recorded here.\n assert not any(\"\\n\" in value for value in d.values())\n return d\n\n\ndef get_hg_build_info(repo):\n env = os.environ.copy()\n env['HG_DIR'] = repo\n env = {str(key): str(value) for key, value in env.items()}\n\n d = {}\n cmd = [\"hg\", \"log\", \"--template\",\n \"{rev}|{node|short}|{latesttag}|{latesttagdistance}|{branch}\",\n \"--rev\", \".\"]\n output = utils.check_output_env(cmd, env=env, cwd=os.path.dirname(repo))\n output = output.decode('utf-8')\n rev, short_id, tag, distance, branch = output.split('|')\n if tag != 'null':\n d['HG_LATEST_TAG'] = tag\n if branch == \"\":\n branch = 'default'\n d['HG_BRANCH'] = branch\n d['HG_NUM_ID'] = rev\n d['HG_LATEST_TAG_DISTANCE'] = distance\n d['HG_SHORT_ID'] = short_id\n d['HG_BUILD_STR'] = '{}_{}'.format(d['HG_NUM_ID'], d['HG_SHORT_ID'])\n return d\n\n\ndef get_dict(m, prefix=None, for_env=True, skip_build_id=False, escape_backslash=False):\n if not prefix:\n prefix = m.config.host_prefix\n\n # conda-build specific vars\n d = conda_build_vars(prefix, m.config)\n\n # languages\n d.update(python_vars(m, prefix, escape_backslash))\n d.update(perl_vars(m, prefix, escape_backslash))\n d.update(lua_vars(m, prefix, escape_backslash))\n d.update(r_vars(m, prefix, escape_backslash))\n\n if m:\n d.update(meta_vars(m, skip_build_id=skip_build_id))\n\n # system\n d.update(system_vars(d, m, prefix))\n\n # features\n d.update({feat.upper(): str(int(value)) for feat, value in\n feature_list})\n\n for k, v in m.config.variant.items():\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\n d[k] = v\n return d\n\n\ndef conda_build_vars(prefix, config):\n src_dir = config.test_dir if os.path.basename(prefix)[:2] == '_t' else config.work_dir\n return {\n 'CONDA_BUILD': '1',\n 'PYTHONNOUSERSITE': '1',\n 'CONDA_DEFAULT_ENV': config.host_prefix,\n 'ARCH': str(config.host_arch),\n # This is the one that is most important for where people put artifacts that get bundled.\n # It is fed from our function argument, and can be any of:\n # 1. Build prefix - when host requirements are not explicitly set,\n # then prefix = build prefix = host prefix\n # 2. Host prefix - when host requirements are explicitly set, prefix = host prefix\n # 3. Test prefix - during test runs, this points at the test prefix\n 'PREFIX': prefix,\n # This is for things that are specifically build tools. Things that run on the build\n # platform, but probably should not be linked against, since they may not run on the\n # destination host platform\n # It can be equivalent to config.host_prefix if the host section is not explicitly set.\n 'BUILD_PREFIX': config.build_prefix,\n 'SYS_PREFIX': sys.prefix,\n 'SYS_PYTHON': sys.executable,\n 'SUBDIR': config.host_subdir,\n 'SRC_DIR': src_dir,\n 'HTTPS_PROXY': os.getenv('HTTPS_PROXY', ''),\n 'HTTP_PROXY': os.getenv('HTTP_PROXY', ''),\n 'REQUESTS_CA_BUNDLE': os.getenv('REQUESTS_CA_BUNDLE', ''),\n 'DIRTY': '1' if config.dirty else '',\n 'ROOT': root_dir,\n }\n\n\ndef python_vars(metadata, prefix, escape_backslash):\n py_ver = get_py_ver(metadata.config)\n stdlib_dir = utils.get_stdlib_dir(prefix, py_ver)\n sp_dir = utils.get_site_packages(prefix, py_ver)\n\n if utils.on_win and escape_backslash:\n stdlib_dir = stdlib_dir.replace('\\\\', '\\\\\\\\')\n sp_dir = sp_dir.replace('\\\\', '\\\\\\\\')\n\n vars_ = {\n 'CONDA_PY': ''.join(py_ver.split('.')[:2]),\n 'PY3K': str(int(int(py_ver[0]) >= 3)),\n 'PY_VER': py_ver,\n 'STDLIB_DIR': stdlib_dir,\n 'SP_DIR': sp_dir,\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'python' in deps or metadata.name(fail_ok=True) == 'python':\n python_bin = metadata.config.python_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n python_bin = python_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PYTHON': python_bin,\n })\n\n np_ver = metadata.config.variant.get('numpy', get_default_variant(metadata.config)['numpy'])\n vars_['NPY_VER'] = '.'.join(np_ver.split('.')[:2])\n vars_['CONDA_NPY'] = ''.join(np_ver.split('.')[:2])\n vars_['NPY_DISTUTILS_APPEND_FLAGS'] = '1'\n return vars_\n\n\ndef perl_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'PERL_VER': get_perl_ver(metadata.config),\n 'CONDA_PERL': get_perl_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'perl' in deps or metadata.name(fail_ok=True) == 'perl':\n perl_bin = metadata.config.perl_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n perl_bin = perl_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PERL': perl_bin,\n })\n return vars_\n\n\ndef lua_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'LUA_VER': get_lua_ver(metadata.config),\n 'CONDA_LUA': get_lua_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'lua' in deps:\n lua_bin = metadata.config.lua_bin(prefix, metadata.config.host_subdir)\n lua_include_dir = get_lua_include_dir(metadata.config)\n\n if utils.on_win and escape_backslash:\n lua_bin = lua_bin.replace('\\\\', '\\\\\\\\')\n lua_include_dir = lua_include_dir.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'LUA': lua_bin,\n 'LUA_INCLUDE_DIR': lua_include_dir,\n })\n return vars_\n\n\ndef r_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'R_VER': get_r_ver(metadata.config),\n 'CONDA_R': get_r_ver(metadata.config),\n }\n\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'r-base' in deps or 'mro-base' in deps or metadata.name(fail_ok=True) in (\n 'r-base', 'mro-base'):\n r_bin = metadata.config.r_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n r_bin = r_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'R': r_bin,\n })\n return vars_\n\n\ndef meta_vars(meta, skip_build_id=False):\n d = {}\n for var_name in ensure_list(meta.get_value('build/script_env', [])):\n value = os.getenv(var_name)\n if value is None:\n warnings.warn(\n \"The environment variable '%s' is undefined.\" % var_name,\n UserWarning\n )\n else:\n d[var_name] = value\n warnings.warn(\n \"The environment variable '%s' is being passed through with value %s. \"\n \"If you are splitting build and test phases with --no-test, please ensure \"\n \"that this value is also set similarly at test time.\" % (var_name, value),\n UserWarning\n )\n\n folder = meta.get_value('source/0/folder', '')\n repo_dir = join(meta.config.work_dir, folder)\n git_dir = join(repo_dir, '.git')\n hg_dir = join(repo_dir, '.hg')\n\n if not isinstance(git_dir, str):\n # On Windows, subprocess env can't handle unicode.\n git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n git_exe = external.find_executable('git', meta.config.build_prefix)\n if git_exe and os.path.exists(git_dir):\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\n git_url = meta.get_value('source/0/git_url')\n\n if os.path.exists(git_url):\n if sys.platform == 'win32':\n git_url = utils.convert_unix_path_to_win(git_url)\n # If git_url is a relative path instead of a url, convert it to an abspath\n git_url = normpath(join(meta.path, git_url))\n\n _x = False\n\n if git_url:\n _x = verify_git_repo(git_exe,\n git_dir,\n git_url,\n meta.config.git_commits_since_tag,\n meta.config.debug,\n meta.get_value('source/0/git_rev', 'HEAD'))\n\n if _x or meta.get_value('source/0/path'):\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\n\n elif external.find_executable('hg', meta.config.build_prefix) and os.path.exists(hg_dir):\n d.update(get_hg_build_info(hg_dir))\n\n # use `get_value` to prevent early exit while name is still unresolved during rendering\n d['PKG_NAME'] = meta.get_value('package/name')\n d['PKG_VERSION'] = meta.version()\n d['PKG_BUILDNUM'] = str(meta.build_number() or 0)\n if meta.final and not skip_build_id:\n d['PKG_BUILD_STRING'] = str(meta.build_id())\n d['PKG_HASH'] = meta.hash_dependencies()\n else:\n d['PKG_BUILD_STRING'] = 'placeholder'\n d['PKG_HASH'] = '1234567'\n d['RECIPE_DIR'] = (meta.path if meta.path else\n meta.meta.get('extra', {}).get('parent_recipe', {}).get('path', ''))\n return d\n\n\n@memoized\ndef get_cpu_count():\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, _ = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True,\n stdout=subprocess.PIPE).communicate()\n return out.decode('utf-8').strip()\n else:\n try:\n return str(multiprocessing.cpu_count())\n except NotImplementedError:\n return \"1\"\n\n\ndef get_shlib_ext():\n # Return the shared library extension.\n if sys.platform == 'win32':\n return '.dll'\n elif sys.platform == 'darwin':\n return '.dylib'\n elif sys.platform.startswith('linux'):\n return '.so'\n else:\n raise NotImplementedError(sys.platform)\n\n\ndef windows_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n # We have gone for the clang values here.\n win_arch = 'i386' if str(m.config.host_arch) == '32' else 'amd64'\n win_msvc = '19.0.0' if PY3 else '15.0.0'\n library_prefix = join(prefix, 'Library')\n drive, tail = m.config.host_prefix.split(':')\n get_default('SCRIPTS', join(prefix, 'Scripts'))\n get_default('LIBRARY_PREFIX', library_prefix)\n get_default('LIBRARY_BIN', join(library_prefix, 'bin'))\n get_default('LIBRARY_INC', join(library_prefix, 'include'))\n get_default('LIBRARY_LIB', join(library_prefix, 'lib'))\n get_default('CYGWIN_PREFIX', ''.join(('/cygdrive/', drive.lower(), tail.replace('\\\\', '/'))))\n # see https://en.wikipedia.org/wiki/Environment_variable#Default_values\n get_default('ALLUSERSPROFILE')\n get_default('APPDATA')\n get_default('CommonProgramFiles')\n get_default('CommonProgramFiles(x86)')\n get_default('CommonProgramW6432')\n get_default('COMPUTERNAME')\n get_default('ComSpec')\n get_default('HOMEDRIVE')\n get_default('HOMEPATH')\n get_default('LOCALAPPDATA')\n get_default('LOGONSERVER')\n get_default('NUMBER_OF_PROCESSORS')\n get_default('PATHEXT')\n get_default('ProgramData')\n get_default('ProgramFiles')\n get_default('ProgramFiles(x86)')\n get_default('ProgramW6432')\n get_default('PROMPT')\n get_default('PSModulePath')\n get_default('PUBLIC')\n get_default('SystemDrive')\n get_default('SystemRoot')\n get_default('TEMP')\n get_default('TMP')\n get_default('USERDOMAIN')\n get_default('USERNAME')\n get_default('USERPROFILE')\n get_default('windir')\n # CPU data, see https://github.com/conda/conda-build/issues/2064\n get_default('PROCESSOR_ARCHITEW6432')\n get_default('PROCESSOR_ARCHITECTURE')\n get_default('PROCESSOR_IDENTIFIER')\n get_default('BUILD', win_arch + '-pc-windows-' + win_msvc)\n for env_var in os.environ.keys():\n if re.match('VS[0-9]{2,3}COMNTOOLS', env_var):\n get_default(env_var)\n\n\ndef unix_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n get_default('HOME', 'UNKNOWN')\n get_default('PKG_CONFIG_PATH', join(prefix, 'lib', 'pkgconfig'))\n get_default('CMAKE_GENERATOR', 'Unix Makefiles')\n get_default('SSL_CERT_FILE')\n\n\ndef osx_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n OSX_ARCH = 'i386' if str(m.config.host_arch) == '32' else 'x86_64'\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n # d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n get_default('OSX_ARCH', OSX_ARCH)\n get_default('MACOSX_DEPLOYMENT_TARGET', '10.9')\n get_default('BUILD', OSX_ARCH + '-apple-darwin13.4.0')\n\n\n@memoized\ndef _machine_and_architecture():\n return platform.machine(), platform.architecture()\n\n\ndef linux_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n platform_machine, platform_architecture = _machine_and_architecture()\n build_arch = platform_machine\n # Python reports x86_64 when running a i686 Python binary on a 64-bit CPU\n # unless run through linux32. Issue a warning when we detect this.\n if build_arch == 'x86_64' and platform_architecture[0] == '32bit':\n print(\"Warning: You are running 32-bit Python on a 64-bit linux installation\")\n print(\" but have not launched it via linux32. Various qeuries *will*\")\n print(\" give unexpected results (uname -m, platform.machine() etc)\")\n build_arch = 'i686'\n # the GNU triplet is powerpc, not ppc. This matters.\n if build_arch.startswith('ppc'):\n build_arch = build_arch.replace('ppc', 'powerpc')\n if build_arch.startswith('powerpc'):\n build_distro = 'cos7'\n else:\n build_distro = 'cos6'\n # There is also QEMU_SET_ENV, but that needs to be\n # filtered so it only contains the result of `linux_vars`\n # which, before this change was empty, and after it only\n # contains other QEMU env vars.\n get_default('CFLAGS')\n get_default('CXXFLAGS')\n get_default('LDFLAGS')\n get_default('QEMU_LD_PREFIX')\n get_default('QEMU_UNAME')\n get_default('DEJAGNU')\n get_default('DISPLAY')\n get_default('LD_RUN_PATH', prefix + '/lib')\n get_default('BUILD', build_arch + '-conda_' + build_distro + '-linux-gnu')\n\n\ndef set_from_os_or_variant(out_dict, key, variant, default):\n value = os.getenv(key)\n if not value:\n value = variant.get(key, default)\n if value:\n out_dict[key] = value\n\n\n@memoized\ndef system_vars(env_dict, m, prefix):\n d = dict()\n # note the dictionary is passed in here - variables are set in that dict if they are non-null\n get_default = lambda key, default='': set_from_os_or_variant(d, key, m.config.variant, default)\n\n get_default('CPU_COUNT', get_cpu_count())\n get_default('LANG')\n get_default('LC_ALL')\n get_default('MAKEFLAGS')\n d['SHLIB_EXT'] = get_shlib_ext()\n d['PATH'] = os.environ.copy()['PATH']\n\n if not m.config.activate:\n d = prepend_bin_path(d, m.config.host_prefix)\n\n if sys.platform == 'win32':\n windows_vars(m, get_default, prefix)\n else:\n unix_vars(m, get_default, prefix)\n\n if sys.platform == 'darwin':\n osx_vars(m, get_default, prefix)\n elif sys.platform.startswith('linux'):\n linux_vars(m, get_default, prefix)\n\n return d\n\n\nclass InvalidEnvironment(Exception):\n pass\n\n\n# Stripped-down Environment class from conda-tools ( https://github.com/groutr/conda-tools )\n# Vendored here to avoid the whole dependency for just this bit.\ndef _load_json(path):\n with open(path, 'r') as fin:\n x = json.load(fin)\n return x\n\n\ndef _load_all_json(path):\n \"\"\"\n Load all json files in a directory. Return dictionary with filenames mapped to json\n dictionaries.\n \"\"\"\n root, _, files = next(utils.walk(path))\n result = {}\n for f in files:\n if f.endswith('.json'):\n result[f] = _load_json(join(root, f))\n return result\n\n\nclass Environment(object):\n def __init__(self, path):\n \"\"\"\n Initialize an Environment object.\n\n To reflect changes in the underlying environment, a new Environment object should be\n created.\n \"\"\"\n self.path = path\n self._meta = join(path, 'conda-meta')\n if os.path.isdir(path) and os.path.isdir(self._meta):\n self._packages = {}\n else:\n raise InvalidEnvironment('Unable to load environment {}'.format(path))\n\n def _read_package_json(self):\n if not self._packages:\n self._packages = _load_all_json(self._meta)\n\n def package_specs(self):\n \"\"\"\n List all package specs in the environment.\n \"\"\"\n self._read_package_json()\n json_objs = self._packages.values()\n specs = []\n for i in json_objs:\n p, v, b = i['name'], i['version'], i['build']\n specs.append('{} {} {}'.format(p, v, b))\n return specs\n\n\ncached_actions = {}\nlast_index_ts = 0\n\n\ndef get_install_actions(prefix, specs, env, retries=0, subdir=None,\n verbose=True, debug=False, locking=True,\n bldpkgs_dirs=None, timeout=90, disable_pip=False,\n max_env_retry=3, output_folder=None, channel_urls=None):\n global cached_actions\n global last_index_ts\n actions = {}\n log = utils.get_logger(__name__)\n conda_log_level = logging.WARN\n specs = list(specs)\n if verbose:\n capture = contextlib.contextmanager(lambda: (yield))\n elif debug:\n capture = contextlib.contextmanager(lambda: (yield))\n conda_log_level = logging.DEBUG\n else:\n capture = utils.capture\n for feature, value in feature_list:\n if value:\n specs.append('%s@' % feature)\n\n bldpkgs_dirs = ensure_list(bldpkgs_dirs)\n\n index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder,\n channel_urls=channel_urls, debug=debug, verbose=verbose,\n locking=locking, timeout=timeout)\n specs = tuple(utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@'))\n\n if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and\n last_index_ts >= index_ts):\n actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy()\n if \"PREFIX\" in actions:\n actions['PREFIX'] = prefix\n elif specs:\n # this is hiding output like:\n # Fetching package metadata ...........\n # Solving package specifications: ..........\n with utils.LoggingContext(conda_log_level):\n with capture():\n try:\n actions = install_actions(prefix, index, specs, force=True)\n except (NoPackagesFoundError, UnsatisfiableError) as exc:\n raise DependencyNeedsBuildingError(exc, subdir=subdir)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError, AssertionError) as exc:\n if 'lock' in str(exc):\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc) or\n isinstance(exc, AssertionError)):\n locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout)\n with utils.try_acquire_locks(locks, timeout=timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retries < max_env_retry:\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n actions = get_install_actions(prefix, tuple(specs), env,\n retries=retries + 1,\n subdir=subdir,\n verbose=verbose,\n debug=debug,\n locking=locking,\n bldpkgs_dirs=tuple(bldpkgs_dirs),\n timeout=timeout,\n disable_pip=disable_pip,\n max_env_retry=max_env_retry,\n output_folder=output_folder,\n channel_urls=tuple(channel_urls))\n else:\n log.error(\"Failed to get install actions, max retries exceeded.\")\n raise\n if disable_pip:\n for pkg in ('pip', 'setuptools', 'wheel'):\n # specs are the raw specifications, not the conda-derived actual specs\n # We're testing that pip etc. are manually specified\n if not any(re.match('^%s(?:$|[\\s=].*)' % pkg, str(dep)) for dep in specs):\n actions['LINK'] = [spec for spec in actions['LINK'] if spec.name != pkg]\n utils.trim_empty_keys(actions)\n cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy()\n last_index_ts = index_ts\n return actions\n\n\ndef create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0,\n locks=None, is_cross=False, is_conda=False):\n '''\n Create a conda envrionment for the given prefix and specs.\n '''\n if config.debug:\n external_logger_context = utils.LoggingContext(logging.DEBUG)\n else:\n external_logger_context = utils.LoggingContext(logging.WARN)\n\n with external_logger_context:\n log = utils.get_logger(__name__)\n\n # if os.path.isdir(prefix):\n # utils.rm_rf(prefix)\n\n if specs_or_actions: # Don't waste time if there is nothing to do\n log.debug(\"Creating environment in %s\", prefix)\n log.debug(str(specs_or_actions))\n\n with utils.path_prepended(prefix):\n if not locks:\n locks = utils.get_conda_operation_locks(config)\n try:\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n # input is a list - it's specs in MatchSpec format\n if not hasattr(specs_or_actions, 'keys'):\n specs = list(set(specs_or_actions))\n actions = get_install_actions(prefix, tuple(specs), env,\n subdir=subdir,\n verbose=config.verbose,\n debug=config.debug,\n locking=config.locking,\n bldpkgs_dirs=tuple(config.bldpkgs_dirs),\n timeout=config.timeout,\n disable_pip=config.disable_pip,\n max_env_retry=config.max_env_retry,\n output_folder=config.output_folder,\n channel_urls=tuple(config.channel_urls))\n else:\n actions = specs_or_actions\n index, index_ts = get_build_index(subdir=subdir,\n bldpkgs_dir=config.bldpkgs_dir,\n output_folder=config.output_folder,\n channel_urls=config.channel_urls,\n debug=config.debug,\n verbose=config.verbose,\n locking=config.locking,\n timeout=config.timeout)\n utils.trim_empty_keys(actions)\n display_actions(actions, index)\n if utils.on_win:\n for k, v in os.environ.items():\n os.environ[k] = str(v)\n execute_actions(actions, index, verbose=config.debug)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError) as exc:\n if ((\"too short in\" in str(exc) or\n re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or\n isinstance(exc, PaddingError)) and\n config.prefix_length > 80):\n if config.prefix_length_fallback:\n log.warn(\"Build prefix failed with prefix length %d\",\n config.prefix_length)\n log.warn(\"Error was: \")\n log.warn(str(exc))\n log.warn(\"One or more of your package dependencies needs to be rebuilt \"\n \"with a longer prefix length.\")\n log.warn(\"Falling back to legacy prefix length of 80 characters.\")\n log.warn(\"Your package will not install into prefixes > 80 characters.\")\n config.prefix_length = 80\n\n host = '_h_env' in prefix\n # Set this here and use to create environ\n # Setting this here is important because we use it below (symlink)\n prefix = config.host_prefix if host else config.build_prefix\n actions['PREFIX'] = prefix\n\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, is_cross=is_cross)\n else:\n raise\n elif 'lock' in str(exc):\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc)):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n else:\n raise\n # HACK: some of the time, conda screws up somehow and incomplete packages result.\n # Just retry.\n except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc:\n if isinstance(exc, AssertionError):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = os.path.dirname(os.path.dirname(str(exc)))\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n\n if not is_conda:\n # Symlinking conda is critical here to make sure that activate scripts are not\n # accidentally included in packages.\n if utils.on_win:\n shell = \"cmd.exe\"\n else:\n shell = \"bash\"\n symlink_conda(prefix, sys.prefix, shell)\n\n\ndef clean_pkg_cache(dist, config):\n locks = []\n\n conda_log_level = logging.WARN\n if config.debug:\n conda_log_level = logging.DEBUG\n\n _pkgs_dirs = pkgs_dirs[:1]\n if config.locking:\n locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]\n with utils.LoggingContext(conda_log_level):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n rmplan = [\n 'RM_EXTRACTED {0} local::{0}'.format(dist),\n 'RM_FETCHED {0} local::{0}'.format(dist),\n ]\n execute_plan(rmplan)\n\n # Conda does not seem to do a complete cleanup sometimes. This is supplemental.\n # Conda's cleanup is still necessary - it keeps track of its own in-memory\n # list of downloaded things.\n for folder in pkgs_dirs:\n try:\n assert not os.path.exists(os.path.join(folder, dist))\n assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))\n for pkg_id in [dist, 'local::' + dist]:\n assert pkg_id not in package_cache()\n except AssertionError:\n log = utils.get_logger(__name__)\n log.debug(\"Conda caching error: %s package remains in cache after removal\",\n dist)\n log.debug(\"manually removing to compensate\")\n cache = package_cache()\n keys = [key for key in cache.keys() if dist in key]\n for pkg_id in keys:\n if pkg_id in cache:\n del cache[pkg_id]\n for entry in glob(os.path.join(folder, dist + '*')):\n utils.rm_rf(entry)\n\n\ndef get_pinned_deps(m, section):\n with TemporaryDirectory(prefix='_') as tmpdir:\n actions = get_install_actions(tmpdir,\n tuple(m.ms_depends(section)), section,\n subdir=m.config.target_subdir,\n debug=m.config.debug,\n verbose=m.config.verbose,\n locking=m.config.locking,\n bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),\n timeout=m.config.timeout,\n disable_pip=m.config.disable_pip,\n max_env_retry=m.config.max_env_retry,\n output_folder=m.config.output_folder,\n channel_urls=tuple(m.config.channel_urls))\n runtime_deps = [' '.join(link.dist_name.rsplit('-', 2)) for link in actions.get('LINK', [])]\n return runtime_deps\n","path":"conda_build/environ.py"}],"string":"[\n {\n \"content\": \"from __future__ import absolute_import, division, print_function\\n\\nimport contextlib\\nimport json\\nimport logging\\nimport multiprocessing\\nimport os\\nimport platform\\nimport re\\nimport subprocess\\nimport sys\\nimport warnings\\nfrom glob import glob\\nfrom os.path import join, normpath\\n\\n# noqa here because PY3 is used only on windows, and trips up flake8 otherwise.\\nfrom .conda_interface import text_type, PY3 # noqa\\nfrom .conda_interface import (CondaError, LinkError, LockError, NoPackagesFoundError,\\n PaddingError, UnsatisfiableError)\\nfrom .conda_interface import display_actions, execute_actions, execute_plan, install_actions\\nfrom .conda_interface import memoized\\nfrom .conda_interface import package_cache, TemporaryDirectory\\nfrom .conda_interface import pkgs_dirs, root_dir, symlink_conda\\n\\nfrom conda_build import utils\\nfrom conda_build.exceptions import DependencyNeedsBuildingError\\nfrom conda_build.features import feature_list\\nfrom conda_build.index import get_build_index\\nfrom conda_build.os_utils import external\\nfrom conda_build.utils import ensure_list, prepend_bin_path\\nfrom conda_build.variants import get_default_variant\\n\\n\\n# these are things that we provide env vars for more explicitly. This list disables the\\n# pass-through of variant values to env vars for these keys.\\nLANGUAGES = ('PERL', 'LUA', 'R', \\\"NUMPY\\\", 'PYTHON')\\n\\n\\ndef get_perl_ver(config):\\n return '.'.join(config.variant.get('perl', get_default_variant(config)['perl']).split('.')[:2])\\n\\n\\ndef get_lua_ver(config):\\n return '.'.join(config.variant.get('lua', get_default_variant(config)['lua']).split('.')[:2])\\n\\n\\ndef get_py_ver(config):\\n py = config.variant.get('python', get_default_variant(config)['python'])\\n if not hasattr(py, 'split'):\\n py = py[0]\\n return '.'.join(py.split('.')[:2])\\n\\n\\ndef get_r_ver(config):\\n return '.'.join(config.variant.get('r_base',\\n get_default_variant(config)['r_base']).split('.')[:3])\\n\\n\\ndef get_npy_ver(config):\\n conda_npy = ''.join(str(config.variant.get('numpy') or\\n get_default_variant(config)['numpy']).split('.'))\\n # Convert int -> string, e.g.\\n # 17 -> '1.7'\\n # 110 -> '1.10'\\n return conda_npy[0] + '.' + conda_npy[1:]\\n\\n\\ndef get_lua_include_dir(config):\\n return join(config.host_prefix, \\\"include\\\")\\n\\n\\n@memoized\\ndef verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=False,\\n expected_rev='HEAD'):\\n env = os.environ.copy()\\n log = utils.get_logger(__name__)\\n\\n if debug:\\n stderr = None\\n else:\\n FNULL = open(os.devnull, 'w')\\n stderr = FNULL\\n\\n if not expected_rev:\\n return False\\n\\n OK = True\\n\\n env['GIT_DIR'] = git_dir\\n try:\\n # Verify current commit (minus our locally applied patches) matches expected commit\\n current_commit = utils.check_output_env([git_exe,\\n \\\"log\\\",\\n \\\"-n1\\\",\\n \\\"--format=%H\\\",\\n \\\"HEAD\\\" + \\\"^\\\" * git_commits_since_tag],\\n env=env, stderr=stderr)\\n current_commit = current_commit.decode('utf-8')\\n expected_tag_commit = utils.check_output_env([git_exe, \\\"log\\\", \\\"-n1\\\", \\\"--format=%H\\\",\\n expected_rev],\\n env=env, stderr=stderr)\\n expected_tag_commit = expected_tag_commit.decode('utf-8')\\n\\n if current_commit != expected_tag_commit:\\n return False\\n\\n # Verify correct remote url. Need to find the git cache directory,\\n # and check the remote from there.\\n cache_details = utils.check_output_env([git_exe, \\\"remote\\\", \\\"-v\\\"], env=env,\\n stderr=stderr)\\n cache_details = cache_details.decode('utf-8')\\n cache_dir = cache_details.split('\\\\n')[0].split()[1]\\n\\n if not isinstance(cache_dir, str):\\n # On Windows, subprocess env can't handle unicode.\\n cache_dir = cache_dir.encode(sys.getfilesystemencoding() or 'utf-8')\\n\\n try:\\n remote_details = utils.check_output_env([git_exe, \\\"--git-dir\\\", cache_dir,\\n \\\"remote\\\", \\\"-v\\\"],\\n env=env, stderr=stderr)\\n except subprocess.CalledProcessError:\\n if sys.platform == 'win32' and cache_dir.startswith('/'):\\n cache_dir = utils.convert_unix_path_to_win(cache_dir)\\n remote_details = utils.check_output_env([git_exe, \\\"--git-dir\\\", cache_dir,\\n \\\"remote\\\", \\\"-v\\\"],\\n env=env, stderr=stderr)\\n remote_details = remote_details.decode('utf-8')\\n remote_url = remote_details.split('\\\\n')[0].split()[1]\\n\\n # on windows, remote URL comes back to us as cygwin or msys format. Python doesn't\\n # know how to normalize it. Need to convert it to a windows path.\\n if sys.platform == 'win32' and remote_url.startswith('/'):\\n remote_url = utils.convert_unix_path_to_win(git_url)\\n\\n if os.path.exists(remote_url):\\n # Local filepaths are allowed, but make sure we normalize them\\n remote_url = normpath(remote_url)\\n\\n # If the current source directory in conda-bld/work doesn't match the user's\\n # metadata git_url or git_rev, then we aren't looking at the right source.\\n if not os.path.isdir(remote_url) and remote_url.lower() != git_url.lower():\\n log.debug(\\\"remote does not match git_url\\\")\\n log.debug(\\\"Remote: \\\" + remote_url.lower())\\n log.debug(\\\"git_url: \\\" + git_url.lower())\\n OK = False\\n except subprocess.CalledProcessError as error:\\n log.debug(\\\"Error obtaining git information in verify_git_repo. Error was: \\\")\\n log.debug(str(error))\\n OK = False\\n finally:\\n if not debug:\\n FNULL.close()\\n return OK\\n\\n\\ndef get_git_info(git_exe, repo, debug):\\n \\\"\\\"\\\"\\n Given a repo to a git repo, return a dictionary of:\\n GIT_DESCRIBE_TAG\\n GIT_DESCRIBE_NUMBER\\n GIT_DESCRIBE_HASH\\n GIT_FULL_HASH\\n GIT_BUILD_STR\\n from the output of git describe.\\n :return:\\n \\\"\\\"\\\"\\n d = {}\\n log = utils.get_logger(__name__)\\n\\n if debug:\\n stderr = None\\n else:\\n FNULL = open(os.devnull, 'w')\\n stderr = FNULL\\n\\n # grab information from describe\\n env = os.environ.copy()\\n env['GIT_DIR'] = repo\\n keys = [\\\"GIT_DESCRIBE_TAG\\\", \\\"GIT_DESCRIBE_NUMBER\\\", \\\"GIT_DESCRIBE_HASH\\\"]\\n\\n try:\\n output = utils.check_output_env([git_exe, \\\"describe\\\", \\\"--tags\\\", \\\"--long\\\", \\\"HEAD\\\"],\\n env=env, cwd=os.path.dirname(repo),\\n stderr=stderr).splitlines()[0]\\n output = output.decode('utf-8')\\n parts = output.rsplit('-', 2)\\n if len(parts) == 3:\\n d.update(dict(zip(keys, parts)))\\n except subprocess.CalledProcessError:\\n msg = (\\n \\\"Failed to obtain git tag information.\\\\n\\\"\\n \\\"Consider using annotated tags if you are not already \\\"\\n \\\"as they are more reliable when used with git describe.\\\"\\n )\\n log.debug(msg)\\n\\n try:\\n # get the _full_ hash of the current HEAD\\n output = utils.check_output_env([git_exe, \\\"rev-parse\\\", \\\"HEAD\\\"],\\n env=env, cwd=os.path.dirname(repo),\\n stderr=stderr).splitlines()[0]\\n output = output.decode('utf-8')\\n\\n d['GIT_FULL_HASH'] = output\\n except subprocess.CalledProcessError as error:\\n log.debug(\\\"Error obtaining git commit information. Error was: \\\")\\n log.debug(str(error))\\n\\n # set up the build string\\n if \\\"GIT_DESCRIBE_NUMBER\\\" in d and \\\"GIT_DESCRIBE_HASH\\\" in d:\\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[\\\"GIT_DESCRIBE_NUMBER\\\"],\\n d[\\\"GIT_DESCRIBE_HASH\\\"])\\n\\n # issues on Windows with the next line of the command prompt being recorded here.\\n assert not any(\\\"\\\\n\\\" in value for value in d.values())\\n return d\\n\\n\\ndef get_hg_build_info(repo):\\n env = os.environ.copy()\\n env['HG_DIR'] = repo\\n env = {str(key): str(value) for key, value in env.items()}\\n\\n d = {}\\n cmd = [\\\"hg\\\", \\\"log\\\", \\\"--template\\\",\\n \\\"{rev}|{node|short}|{latesttag}|{latesttagdistance}|{branch}\\\",\\n \\\"--rev\\\", \\\".\\\"]\\n output = utils.check_output_env(cmd, env=env, cwd=os.path.dirname(repo))\\n output = output.decode('utf-8')\\n rev, short_id, tag, distance, branch = output.split('|')\\n if tag != 'null':\\n d['HG_LATEST_TAG'] = tag\\n if branch == \\\"\\\":\\n branch = 'default'\\n d['HG_BRANCH'] = branch\\n d['HG_NUM_ID'] = rev\\n d['HG_LATEST_TAG_DISTANCE'] = distance\\n d['HG_SHORT_ID'] = short_id\\n d['HG_BUILD_STR'] = '{}_{}'.format(d['HG_NUM_ID'], d['HG_SHORT_ID'])\\n return d\\n\\n\\ndef get_dict(m, prefix=None, for_env=True, skip_build_id=False, escape_backslash=False):\\n if not prefix:\\n prefix = m.config.host_prefix\\n\\n # conda-build specific vars\\n d = conda_build_vars(prefix, m.config)\\n\\n # languages\\n d.update(python_vars(m, prefix, escape_backslash))\\n d.update(perl_vars(m, prefix, escape_backslash))\\n d.update(lua_vars(m, prefix, escape_backslash))\\n d.update(r_vars(m, prefix, escape_backslash))\\n\\n if m:\\n d.update(meta_vars(m, skip_build_id=skip_build_id))\\n\\n # system\\n d.update(system_vars(d, m, prefix))\\n\\n # features\\n d.update({feat.upper(): str(int(value)) for feat, value in\\n feature_list})\\n\\n for k, v in m.config.variant.items():\\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\\n d[k] = v\\n return d\\n\\n\\ndef conda_build_vars(prefix, config):\\n src_dir = config.test_dir if os.path.basename(prefix)[:2] == '_t' else config.work_dir\\n return {\\n 'CONDA_BUILD': '1',\\n 'PYTHONNOUSERSITE': '1',\\n 'CONDA_DEFAULT_ENV': config.host_prefix,\\n 'ARCH': str(config.host_arch),\\n # This is the one that is most important for where people put artifacts that get bundled.\\n # It is fed from our function argument, and can be any of:\\n # 1. Build prefix - when host requirements are not explicitly set,\\n # then prefix = build prefix = host prefix\\n # 2. Host prefix - when host requirements are explicitly set, prefix = host prefix\\n # 3. Test prefix - during test runs, this points at the test prefix\\n 'PREFIX': prefix,\\n # This is for things that are specifically build tools. Things that run on the build\\n # platform, but probably should not be linked against, since they may not run on the\\n # destination host platform\\n # It can be equivalent to config.host_prefix if the host section is not explicitly set.\\n 'BUILD_PREFIX': config.build_prefix,\\n 'SYS_PREFIX': sys.prefix,\\n 'SYS_PYTHON': sys.executable,\\n 'SUBDIR': config.host_subdir,\\n 'SRC_DIR': src_dir,\\n 'HTTPS_PROXY': os.getenv('HTTPS_PROXY', ''),\\n 'HTTP_PROXY': os.getenv('HTTP_PROXY', ''),\\n 'REQUESTS_CA_BUNDLE': os.getenv('REQUESTS_CA_BUNDLE', ''),\\n 'DIRTY': '1' if config.dirty else '',\\n 'ROOT': root_dir,\\n }\\n\\n\\ndef python_vars(metadata, prefix, escape_backslash):\\n py_ver = get_py_ver(metadata.config)\\n stdlib_dir = utils.get_stdlib_dir(prefix, py_ver)\\n sp_dir = utils.get_site_packages(prefix, py_ver)\\n\\n if utils.on_win and escape_backslash:\\n stdlib_dir = stdlib_dir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n sp_dir = sp_dir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_ = {\\n 'CONDA_PY': ''.join(py_ver.split('.')[:2]),\\n 'PY3K': str(int(int(py_ver[0]) >= 3)),\\n 'PY_VER': py_ver,\\n 'STDLIB_DIR': stdlib_dir,\\n 'SP_DIR': sp_dir,\\n }\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'python' in deps or metadata.name(fail_ok=True) == 'python':\\n python_bin = metadata.config.python_bin(prefix, metadata.config.host_subdir)\\n\\n if utils.on_win and escape_backslash:\\n python_bin = python_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n # host prefix is always fine, because it is the same as build when is_cross is False\\n 'PYTHON': python_bin,\\n })\\n\\n np_ver = metadata.config.variant.get('numpy', get_default_variant(metadata.config)['numpy'])\\n vars_['NPY_VER'] = '.'.join(np_ver.split('.')[:2])\\n vars_['CONDA_NPY'] = ''.join(np_ver.split('.')[:2])\\n vars_['NPY_DISTUTILS_APPEND_FLAGS'] = '1'\\n return vars_\\n\\n\\ndef perl_vars(metadata, prefix, escape_backslash):\\n vars_ = {\\n 'PERL_VER': get_perl_ver(metadata.config),\\n 'CONDA_PERL': get_perl_ver(metadata.config),\\n }\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'perl' in deps or metadata.name(fail_ok=True) == 'perl':\\n perl_bin = metadata.config.perl_bin(prefix, metadata.config.host_subdir)\\n\\n if utils.on_win and escape_backslash:\\n perl_bin = perl_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n # host prefix is always fine, because it is the same as build when is_cross is False\\n 'PERL': perl_bin,\\n })\\n return vars_\\n\\n\\ndef lua_vars(metadata, prefix, escape_backslash):\\n vars_ = {\\n 'LUA_VER': get_lua_ver(metadata.config),\\n 'CONDA_LUA': get_lua_ver(metadata.config),\\n }\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'lua' in deps:\\n lua_bin = metadata.config.lua_bin(prefix, metadata.config.host_subdir)\\n lua_include_dir = get_lua_include_dir(metadata.config)\\n\\n if utils.on_win and escape_backslash:\\n lua_bin = lua_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n lua_include_dir = lua_include_dir.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n 'LUA': lua_bin,\\n 'LUA_INCLUDE_DIR': lua_include_dir,\\n })\\n return vars_\\n\\n\\ndef r_vars(metadata, prefix, escape_backslash):\\n vars_ = {\\n 'R_VER': get_r_ver(metadata.config),\\n 'CONDA_R': get_r_ver(metadata.config),\\n }\\n\\n build_or_host = 'host' if metadata.is_cross else 'build'\\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\\n if 'r-base' in deps or 'mro-base' in deps or metadata.name(fail_ok=True) in (\\n 'r-base', 'mro-base'):\\n r_bin = metadata.config.r_bin(prefix, metadata.config.host_subdir)\\n\\n if utils.on_win and escape_backslash:\\n r_bin = r_bin.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\\n\\n vars_.update({\\n 'R': r_bin,\\n })\\n return vars_\\n\\n\\ndef meta_vars(meta, skip_build_id=False):\\n d = {}\\n for var_name in ensure_list(meta.get_value('build/script_env', [])):\\n value = os.getenv(var_name)\\n if value is None:\\n warnings.warn(\\n \\\"The environment variable '%s' is undefined.\\\" % var_name,\\n UserWarning\\n )\\n else:\\n d[var_name] = value\\n warnings.warn(\\n \\\"The environment variable '%s' is being passed through with value %s. \\\"\\n \\\"If you are splitting build and test phases with --no-test, please ensure \\\"\\n \\\"that this value is also set similarly at test time.\\\" % (var_name, value),\\n UserWarning\\n )\\n\\n folder = meta.get_value('source/0/folder', '')\\n repo_dir = join(meta.config.work_dir, folder)\\n git_dir = join(repo_dir, '.git')\\n hg_dir = join(repo_dir, '.hg')\\n\\n if not isinstance(git_dir, str):\\n # On Windows, subprocess env can't handle unicode.\\n git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')\\n\\n git_exe = external.find_executable('git', meta.config.build_prefix)\\n if git_exe and os.path.exists(git_dir):\\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\\n git_url = meta.get_value('source/0/git_url')\\n\\n if os.path.exists(git_url):\\n if sys.platform == 'win32':\\n git_url = utils.convert_unix_path_to_win(git_url)\\n # If git_url is a relative path instead of a url, convert it to an abspath\\n git_url = normpath(join(meta.path, git_url))\\n\\n _x = False\\n\\n if git_url:\\n _x = verify_git_repo(git_exe,\\n git_dir,\\n git_url,\\n meta.config.git_commits_since_tag,\\n meta.config.debug,\\n meta.get_value('source/0/git_rev', 'HEAD'))\\n\\n if _x or meta.get_value('source/0/path'):\\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\\n\\n elif external.find_executable('hg', meta.config.build_prefix) and os.path.exists(hg_dir):\\n d.update(get_hg_build_info(hg_dir))\\n\\n # use `get_value` to prevent early exit while name is still unresolved during rendering\\n d['PKG_NAME'] = meta.get_value('package/name')\\n d['PKG_VERSION'] = meta.version()\\n d['PKG_BUILDNUM'] = str(meta.build_number() or 0)\\n if meta.final and not skip_build_id:\\n d['PKG_BUILD_STRING'] = str(meta.build_id())\\n d['PKG_HASH'] = meta.hash_dependencies()\\n else:\\n d['PKG_BUILD_STRING'] = 'placeholder'\\n d['PKG_HASH'] = '1234567'\\n d['RECIPE_DIR'] = (meta.path if meta.path else\\n meta.meta.get('extra', {}).get('parent_recipe', {}).get('path', ''))\\n return d\\n\\n\\n@memoized\\ndef get_cpu_count():\\n if sys.platform == \\\"darwin\\\":\\n # multiprocessing.cpu_count() is not reliable on OSX\\n # See issue #645 on github.com/conda/conda-build\\n out, _ = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True,\\n stdout=subprocess.PIPE).communicate()\\n return out.decode('utf-8').strip()\\n else:\\n try:\\n return str(multiprocessing.cpu_count())\\n except NotImplementedError:\\n return \\\"1\\\"\\n\\n\\ndef get_shlib_ext():\\n # Return the shared library extension.\\n if sys.platform == 'win32':\\n return '.dll'\\n elif sys.platform == 'darwin':\\n return '.dylib'\\n elif sys.platform.startswith('linux'):\\n return '.so'\\n else:\\n raise NotImplementedError(sys.platform)\\n\\n\\ndef windows_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n # We have gone for the clang values here.\\n win_arch = 'i386' if str(m.config.host_arch) == '32' else 'amd64'\\n win_msvc = '19.0.0' if PY3 else '15.0.0'\\n library_prefix = join(prefix, 'Library')\\n drive, tail = m.config.host_prefix.split(':')\\n get_default('SCRIPTS', join(prefix, 'Scripts'))\\n get_default('LIBRARY_PREFIX', library_prefix)\\n get_default('LIBRARY_BIN', join(library_prefix, 'bin'))\\n get_default('LIBRARY_INC', join(library_prefix, 'include'))\\n get_default('LIBRARY_LIB', join(library_prefix, 'lib'))\\n get_default('CYGWIN_PREFIX', ''.join(('/cygdrive/', drive.lower(), tail.replace('\\\\\\\\', '/'))))\\n # see https://en.wikipedia.org/wiki/Environment_variable#Default_values\\n get_default('ALLUSERSPROFILE')\\n get_default('APPDATA')\\n get_default('CommonProgramFiles')\\n get_default('CommonProgramFiles(x86)')\\n get_default('CommonProgramW6432')\\n get_default('COMPUTERNAME')\\n get_default('ComSpec')\\n get_default('HOMEDRIVE')\\n get_default('HOMEPATH')\\n get_default('LOCALAPPDATA')\\n get_default('LOGONSERVER')\\n get_default('NUMBER_OF_PROCESSORS')\\n get_default('PATHEXT')\\n get_default('ProgramData')\\n get_default('ProgramFiles')\\n get_default('ProgramFiles(x86)')\\n get_default('ProgramW6432')\\n get_default('PROMPT')\\n get_default('PSModulePath')\\n get_default('PUBLIC')\\n get_default('SystemDrive')\\n get_default('SystemRoot')\\n get_default('TEMP')\\n get_default('TMP')\\n get_default('USERDOMAIN')\\n get_default('USERNAME')\\n get_default('USERPROFILE')\\n get_default('windir')\\n # CPU data, see https://github.com/conda/conda-build/issues/2064\\n get_default('PROCESSOR_ARCHITEW6432')\\n get_default('PROCESSOR_ARCHITECTURE')\\n get_default('PROCESSOR_IDENTIFIER')\\n get_default('BUILD', win_arch + '-pc-windows-' + win_msvc)\\n for env_var in os.environ.keys():\\n if re.match('VS[0-9]{2,3}COMNTOOLS', env_var):\\n get_default(env_var)\\n\\n\\ndef unix_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n get_default('HOME', 'UNKNOWN')\\n get_default('PKG_CONFIG_PATH', join(prefix, 'lib', 'pkgconfig'))\\n get_default('CMAKE_GENERATOR', 'Unix Makefiles')\\n get_default('SSL_CERT_FILE')\\n\\n\\ndef osx_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n OSX_ARCH = 'i386' if str(m.config.host_arch) == '32' else 'x86_64'\\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\\n # rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\\n # d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\\n get_default('OSX_ARCH', OSX_ARCH)\\n get_default('MACOSX_DEPLOYMENT_TARGET', '10.9')\\n get_default('BUILD', OSX_ARCH + '-apple-darwin13.4.0')\\n\\n\\n@memoized\\ndef _machine_and_architecture():\\n return platform.machine(), platform.architecture()\\n\\n\\ndef linux_vars(m, get_default, prefix):\\n \\\"\\\"\\\"This is setting variables on a dict that is part of the get_default function\\\"\\\"\\\"\\n platform_machine, platform_architecture = _machine_and_architecture()\\n build_arch = platform_machine\\n # Python reports x86_64 when running a i686 Python binary on a 64-bit CPU\\n # unless run through linux32. Issue a warning when we detect this.\\n if build_arch == 'x86_64' and platform_architecture[0] == '32bit':\\n print(\\\"Warning: You are running 32-bit Python on a 64-bit linux installation\\\")\\n print(\\\" but have not launched it via linux32. Various qeuries *will*\\\")\\n print(\\\" give unexpected results (uname -m, platform.machine() etc)\\\")\\n build_arch = 'i686'\\n # the GNU triplet is powerpc, not ppc. This matters.\\n if build_arch.startswith('ppc'):\\n build_arch = build_arch.replace('ppc', 'powerpc')\\n if build_arch.startswith('powerpc'):\\n build_distro = 'cos7'\\n else:\\n build_distro = 'cos6'\\n # There is also QEMU_SET_ENV, but that needs to be\\n # filtered so it only contains the result of `linux_vars`\\n # which, before this change was empty, and after it only\\n # contains other QEMU env vars.\\n get_default('CFLAGS')\\n get_default('CXXFLAGS')\\n get_default('LDFLAGS')\\n get_default('QEMU_LD_PREFIX')\\n get_default('QEMU_UNAME')\\n get_default('DEJAGNU')\\n get_default('DISPLAY')\\n get_default('LD_RUN_PATH', prefix + '/lib')\\n get_default('BUILD', build_arch + '-conda_' + build_distro + '-linux-gnu')\\n\\n\\ndef set_from_os_or_variant(out_dict, key, variant, default):\\n value = os.getenv(key)\\n if not value:\\n value = variant.get(key, default)\\n if value:\\n out_dict[key] = value\\n\\n\\n@memoized\\ndef system_vars(env_dict, m, prefix):\\n d = dict()\\n # note the dictionary is passed in here - variables are set in that dict if they are non-null\\n get_default = lambda key, default='': set_from_os_or_variant(d, key, m.config.variant, default)\\n\\n get_default('CPU_COUNT', get_cpu_count())\\n get_default('LANG')\\n get_default('LC_ALL')\\n get_default('MAKEFLAGS')\\n d['SHLIB_EXT'] = get_shlib_ext()\\n d['PATH'] = os.environ.copy()['PATH']\\n\\n if not m.config.activate:\\n d = prepend_bin_path(d, m.config.host_prefix)\\n\\n if sys.platform == 'win32':\\n windows_vars(m, get_default, prefix)\\n else:\\n unix_vars(m, get_default, prefix)\\n\\n if sys.platform == 'darwin':\\n osx_vars(m, get_default, prefix)\\n elif sys.platform.startswith('linux'):\\n linux_vars(m, get_default, prefix)\\n\\n return d\\n\\n\\nclass InvalidEnvironment(Exception):\\n pass\\n\\n\\n# Stripped-down Environment class from conda-tools ( https://github.com/groutr/conda-tools )\\n# Vendored here to avoid the whole dependency for just this bit.\\ndef _load_json(path):\\n with open(path, 'r') as fin:\\n x = json.load(fin)\\n return x\\n\\n\\ndef _load_all_json(path):\\n \\\"\\\"\\\"\\n Load all json files in a directory. Return dictionary with filenames mapped to json\\n dictionaries.\\n \\\"\\\"\\\"\\n root, _, files = next(utils.walk(path))\\n result = {}\\n for f in files:\\n if f.endswith('.json'):\\n result[f] = _load_json(join(root, f))\\n return result\\n\\n\\nclass Environment(object):\\n def __init__(self, path):\\n \\\"\\\"\\\"\\n Initialize an Environment object.\\n\\n To reflect changes in the underlying environment, a new Environment object should be\\n created.\\n \\\"\\\"\\\"\\n self.path = path\\n self._meta = join(path, 'conda-meta')\\n if os.path.isdir(path) and os.path.isdir(self._meta):\\n self._packages = {}\\n else:\\n raise InvalidEnvironment('Unable to load environment {}'.format(path))\\n\\n def _read_package_json(self):\\n if not self._packages:\\n self._packages = _load_all_json(self._meta)\\n\\n def package_specs(self):\\n \\\"\\\"\\\"\\n List all package specs in the environment.\\n \\\"\\\"\\\"\\n self._read_package_json()\\n json_objs = self._packages.values()\\n specs = []\\n for i in json_objs:\\n p, v, b = i['name'], i['version'], i['build']\\n specs.append('{} {} {}'.format(p, v, b))\\n return specs\\n\\n\\ncached_actions = {}\\nlast_index_ts = 0\\n\\n\\ndef get_install_actions(prefix, specs, env, retries=0, subdir=None,\\n verbose=True, debug=False, locking=True,\\n bldpkgs_dirs=None, timeout=90, disable_pip=False,\\n max_env_retry=3, output_folder=None, channel_urls=None):\\n global cached_actions\\n global last_index_ts\\n actions = {}\\n log = utils.get_logger(__name__)\\n conda_log_level = logging.WARN\\n specs = list(specs)\\n if verbose:\\n capture = contextlib.contextmanager(lambda: (yield))\\n elif debug:\\n capture = contextlib.contextmanager(lambda: (yield))\\n conda_log_level = logging.DEBUG\\n else:\\n capture = utils.capture\\n for feature, value in feature_list:\\n if value:\\n specs.append('%s@' % feature)\\n\\n bldpkgs_dirs = ensure_list(bldpkgs_dirs)\\n\\n index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder,\\n channel_urls=channel_urls, debug=debug, verbose=verbose,\\n locking=locking, timeout=timeout)\\n specs = tuple(utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@'))\\n\\n if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and\\n last_index_ts >= index_ts):\\n actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy()\\n if \\\"PREFIX\\\" in actions:\\n actions['PREFIX'] = prefix\\n elif specs:\\n # this is hiding output like:\\n # Fetching package metadata ...........\\n # Solving package specifications: ..........\\n with utils.LoggingContext(conda_log_level):\\n with capture():\\n try:\\n actions = install_actions(prefix, index, specs, force=True)\\n except (NoPackagesFoundError, UnsatisfiableError) as exc:\\n raise DependencyNeedsBuildingError(exc, subdir=subdir)\\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\\n CondaError, AssertionError) as exc:\\n if 'lock' in str(exc):\\n log.warn(\\\"failed to get install actions, retrying. exception was: %s\\\",\\n str(exc))\\n elif ('requires a minimum conda version' in str(exc) or\\n 'link a source that does not' in str(exc) or\\n isinstance(exc, AssertionError)):\\n locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout)\\n with utils.try_acquire_locks(locks, timeout=timeout):\\n pkg_dir = str(exc)\\n folder = 0\\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\\n pkg_dir = os.path.dirname(pkg_dir)\\n folder += 1\\n log.warn(\\\"I think conda ended up with a partial extraction for %s. \\\"\\n \\\"Removing the folder and retrying\\\", pkg_dir)\\n if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir):\\n utils.rm_rf(pkg_dir)\\n if retries < max_env_retry:\\n log.warn(\\\"failed to get install actions, retrying. exception was: %s\\\",\\n str(exc))\\n actions = get_install_actions(prefix, tuple(specs), env,\\n retries=retries + 1,\\n subdir=subdir,\\n verbose=verbose,\\n debug=debug,\\n locking=locking,\\n bldpkgs_dirs=tuple(bldpkgs_dirs),\\n timeout=timeout,\\n disable_pip=disable_pip,\\n max_env_retry=max_env_retry,\\n output_folder=output_folder,\\n channel_urls=tuple(channel_urls))\\n else:\\n log.error(\\\"Failed to get install actions, max retries exceeded.\\\")\\n raise\\n if disable_pip:\\n for pkg in ('pip', 'setuptools', 'wheel'):\\n # specs are the raw specifications, not the conda-derived actual specs\\n # We're testing that pip etc. are manually specified\\n if not any(re.match('^%s(?:$|[\\\\s=].*)' % pkg, str(dep)) for dep in specs):\\n actions['LINK'] = [spec for spec in actions['LINK'] if spec.name != pkg]\\n utils.trim_empty_keys(actions)\\n cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy()\\n last_index_ts = index_ts\\n return actions\\n\\n\\ndef create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0,\\n locks=None, is_cross=False, is_conda=False):\\n '''\\n Create a conda envrionment for the given prefix and specs.\\n '''\\n if config.debug:\\n external_logger_context = utils.LoggingContext(logging.DEBUG)\\n else:\\n external_logger_context = utils.LoggingContext(logging.WARN)\\n\\n with external_logger_context:\\n log = utils.get_logger(__name__)\\n\\n # if os.path.isdir(prefix):\\n # utils.rm_rf(prefix)\\n\\n if specs_or_actions: # Don't waste time if there is nothing to do\\n log.debug(\\\"Creating environment in %s\\\", prefix)\\n log.debug(str(specs_or_actions))\\n\\n with utils.path_prepended(prefix):\\n if not locks:\\n locks = utils.get_conda_operation_locks(config)\\n try:\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n # input is a list - it's specs in MatchSpec format\\n if not hasattr(specs_or_actions, 'keys'):\\n specs = list(set(specs_or_actions))\\n actions = get_install_actions(prefix, tuple(specs), env,\\n subdir=subdir,\\n verbose=config.verbose,\\n debug=config.debug,\\n locking=config.locking,\\n bldpkgs_dirs=tuple(config.bldpkgs_dirs),\\n timeout=config.timeout,\\n disable_pip=config.disable_pip,\\n max_env_retry=config.max_env_retry,\\n output_folder=config.output_folder,\\n channel_urls=tuple(config.channel_urls))\\n else:\\n actions = specs_or_actions\\n index, index_ts = get_build_index(subdir=subdir,\\n bldpkgs_dir=config.bldpkgs_dir,\\n output_folder=config.output_folder,\\n channel_urls=config.channel_urls,\\n debug=config.debug,\\n verbose=config.verbose,\\n locking=config.locking,\\n timeout=config.timeout)\\n utils.trim_empty_keys(actions)\\n display_actions(actions, index)\\n if utils.on_win:\\n for k, v in os.environ.items():\\n os.environ[k] = str(v)\\n execute_actions(actions, index, verbose=config.debug)\\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\\n CondaError) as exc:\\n if ((\\\"too short in\\\" in str(exc) or\\n re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or\\n isinstance(exc, PaddingError)) and\\n config.prefix_length > 80):\\n if config.prefix_length_fallback:\\n log.warn(\\\"Build prefix failed with prefix length %d\\\",\\n config.prefix_length)\\n log.warn(\\\"Error was: \\\")\\n log.warn(str(exc))\\n log.warn(\\\"One or more of your package dependencies needs to be rebuilt \\\"\\n \\\"with a longer prefix length.\\\")\\n log.warn(\\\"Falling back to legacy prefix length of 80 characters.\\\")\\n log.warn(\\\"Your package will not install into prefixes > 80 characters.\\\")\\n config.prefix_length = 80\\n\\n host = '_h_env' in prefix\\n # Set this here and use to create environ\\n # Setting this here is important because we use it below (symlink)\\n prefix = config.host_prefix if host else config.build_prefix\\n actions['PREFIX'] = prefix\\n\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, is_cross=is_cross)\\n else:\\n raise\\n elif 'lock' in str(exc):\\n if retry < config.max_env_retry:\\n log.warn(\\\"failed to create env, retrying. exception was: %s\\\", str(exc))\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\\n elif ('requires a minimum conda version' in str(exc) or\\n 'link a source that does not' in str(exc)):\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n pkg_dir = str(exc)\\n folder = 0\\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\\n pkg_dir = os.path.dirname(pkg_dir)\\n folder += 1\\n log.warn(\\\"I think conda ended up with a partial extraction for %s. \\\"\\n \\\"Removing the folder and retrying\\\", pkg_dir)\\n if os.path.isdir(pkg_dir):\\n utils.rm_rf(pkg_dir)\\n if retry < config.max_env_retry:\\n log.warn(\\\"failed to create env, retrying. exception was: %s\\\", str(exc))\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\\n else:\\n log.error(\\\"Failed to create env, max retries exceeded.\\\")\\n raise\\n else:\\n raise\\n # HACK: some of the time, conda screws up somehow and incomplete packages result.\\n # Just retry.\\n except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc:\\n if isinstance(exc, AssertionError):\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n pkg_dir = os.path.dirname(os.path.dirname(str(exc)))\\n log.warn(\\\"I think conda ended up with a partial extraction for %s. \\\"\\n \\\"Removing the folder and retrying\\\", pkg_dir)\\n if os.path.isdir(pkg_dir):\\n utils.rm_rf(pkg_dir)\\n if retry < config.max_env_retry:\\n log.warn(\\\"failed to create env, retrying. exception was: %s\\\", str(exc))\\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\\n else:\\n log.error(\\\"Failed to create env, max retries exceeded.\\\")\\n raise\\n\\n if not is_conda:\\n # Symlinking conda is critical here to make sure that activate scripts are not\\n # accidentally included in packages.\\n if utils.on_win:\\n shell = \\\"cmd.exe\\\"\\n else:\\n shell = \\\"bash\\\"\\n symlink_conda(prefix, sys.prefix, shell)\\n\\n\\ndef clean_pkg_cache(dist, config):\\n locks = []\\n\\n conda_log_level = logging.WARN\\n if config.debug:\\n conda_log_level = logging.DEBUG\\n\\n _pkgs_dirs = pkgs_dirs[:1]\\n if config.locking:\\n locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]\\n with utils.LoggingContext(conda_log_level):\\n with utils.try_acquire_locks(locks, timeout=config.timeout):\\n rmplan = [\\n 'RM_EXTRACTED {0} local::{0}'.format(dist),\\n 'RM_FETCHED {0} local::{0}'.format(dist),\\n ]\\n execute_plan(rmplan)\\n\\n # Conda does not seem to do a complete cleanup sometimes. This is supplemental.\\n # Conda's cleanup is still necessary - it keeps track of its own in-memory\\n # list of downloaded things.\\n for folder in pkgs_dirs:\\n try:\\n assert not os.path.exists(os.path.join(folder, dist))\\n assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))\\n for pkg_id in [dist, 'local::' + dist]:\\n assert pkg_id not in package_cache()\\n except AssertionError:\\n log = utils.get_logger(__name__)\\n log.debug(\\\"Conda caching error: %s package remains in cache after removal\\\",\\n dist)\\n log.debug(\\\"manually removing to compensate\\\")\\n cache = package_cache()\\n keys = [key for key in cache.keys() if dist in key]\\n for pkg_id in keys:\\n if pkg_id in cache:\\n del cache[pkg_id]\\n for entry in glob(os.path.join(folder, dist + '*')):\\n utils.rm_rf(entry)\\n\\n\\ndef get_pinned_deps(m, section):\\n with TemporaryDirectory(prefix='_') as tmpdir:\\n actions = get_install_actions(tmpdir,\\n tuple(m.ms_depends(section)), section,\\n subdir=m.config.target_subdir,\\n debug=m.config.debug,\\n verbose=m.config.verbose,\\n locking=m.config.locking,\\n bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),\\n timeout=m.config.timeout,\\n disable_pip=m.config.disable_pip,\\n max_env_retry=m.config.max_env_retry,\\n output_folder=m.config.output_folder,\\n channel_urls=tuple(m.config.channel_urls))\\n runtime_deps = [' '.join(link.dist_name.rsplit('-', 2)) for link in actions.get('LINK', [])]\\n return runtime_deps\\n\",\n \"path\": \"conda_build/environ.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/conda_build/environ.py b/conda_build/environ.py\nindex e67982bfd6..9afe7a026f 100644\n--- a/conda_build/environ.py\n+++ b/conda_build/environ.py\n@@ -154,7 +154,6 @@ def verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=Fals\n return OK\n \n \n-@memoized\n def get_git_info(git_exe, repo, debug):\n \"\"\"\n Given a repo to a git repo, return a dictionary of:\ndiff --git a/tests/test-recipes/variants/29_different_git_vars/conda_build_config.yaml b/tests/test-recipes/variants/29_different_git_vars/conda_build_config.yaml\nnew file mode 100644\nindex 0000000000..b6f56ca4bd\n--- /dev/null\n+++ b/tests/test-recipes/variants/29_different_git_vars/conda_build_config.yaml\n@@ -0,0 +1,3 @@\n+git_tag:\n+ - 1.21.11\n+ - 1.20.0\ndiff --git a/tests/test-recipes/variants/29_different_git_vars/meta.yaml b/tests/test-recipes/variants/29_different_git_vars/meta.yaml\nnew file mode 100644\nindex 0000000000..0c0231ccb3\n--- /dev/null\n+++ b/tests/test-recipes/variants/29_different_git_vars/meta.yaml\n@@ -0,0 +1,15 @@\n+package:\n+ name: conda-build-test-variant-git\n+ version: {{ GIT_DESCRIBE_TAG }}\n+\n+source:\n+ git_url: https://github.com/conda/conda_build_test_recipe\n+ git_tag: {{ git_tag }}\n+\n+build:\n+ number: {{ GIT_DESCRIBE_NUMBER }}\n+\n+requirements:\n+ build:\n+ - python\n+\ndiff --git a/tests/test_variants.py b/tests/test_variants.py\nindex 58c360c2f4..4b2366ce36 100644\n--- a/tests/test_variants.py\n+++ b/tests/test_variants.py\n@@ -441,3 +441,11 @@ def test_custom_compiler():\n recipe = os.path.join(recipe_dir, '28_custom_compiler')\n ms = api.render(recipe, permit_unsatisfiable_variants=True, finalize=False, bypass_env_check=True)\n assert len(ms) == 3\n+\n+\n+def test_different_git_vars():\n+ recipe = os.path.join(recipe_dir, '29_different_git_vars')\n+ ms = api.render(recipe)\n+ versions = [m[0].version() for m in ms]\n+ assert \"1.20.0\" in versions\n+ assert \"1.21.11\" in versions\n"}}},{"rowIdx":439,"cells":{"in_source_id":{"kind":"string","value":"nltk__nltk-3156"},"issue":{"kind":"string","value":"Class 'CharTokenizer' is missing attribute '_string'\nI think the class `CharTokenizer` is missing the attribute `_string=\"\"`\r\n\r\nhttps://github.com/nltk/nltk/blob/fc53edbf6f0763971afca5855386a2a382da37ac/nltk/tokenize/simple.py#L68-L77\r\n\r\nWithout this attribute, when trying to use the class, I get the following error:\r\n\r\n`TypeError: Can't instantiate abstract class CharTokenizer with abstract method _string`\r\n\r\nExample code:\r\n\r\n```python\r\nfrom nltk.tokenize.simple import CharTokenizer\r\n\r\n\r\ntokenizer = CharTokenizer()\r\n```\r\n\r\nError:\r\n\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/home/francis/.local/share/virtualenvs/cafa-challenge-bUqSu2Tm/lib/python3.10/site-packages/IPython/core/interactiveshell.py\", line 3508, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"\", line 1, in \r\n tokenizer = CharTokenizer()\r\nTypeError: Can't instantiate abstract class CharTokenizer with abstract method _string\r\n```\n"},"before_files":{"kind":"list like","value":[{"content":"# Natural Language Toolkit: Simple Tokenizers\n#\n# Copyright (C) 2001-2023 NLTK Project\n# Author: Edward Loper \n# Steven Bird \n# URL: \n# For license information, see LICENSE.TXT\n\nr\"\"\"\nSimple Tokenizers\n\nThese tokenizers divide strings into substrings using the string\n``split()`` method.\nWhen tokenizing using a particular delimiter string, use\nthe string ``split()`` method directly, as this is more efficient.\n\nThe simple tokenizers are *not* available as separate functions;\ninstead, you should just use the string ``split()`` method directly:\n\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> s.split() # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',\n 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']\n >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n >>> s.split('\\n') # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n\nThe simple tokenizers are mainly useful because they follow the\nstandard ``TokenizerI`` interface, and so can be used with any code\nthat expects a tokenizer. For example, these tokenizers can be used\nto specify the tokenization conventions when building a `CorpusReader`.\n\n\"\"\"\n\nfrom nltk.tokenize.api import StringTokenizer, TokenizerI\nfrom nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize\n\n\nclass SpaceTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string using the space character as a delimiter,\n which is the same as ``s.split(' ')``.\n\n >>> from nltk.tokenize import SpaceTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n \"\"\"\n\n _string = \" \"\n\n\nclass TabTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string use the tab character as a delimiter,\n the same as ``s.split('\\t')``.\n\n >>> from nltk.tokenize import TabTokenizer\n >>> TabTokenizer().tokenize('a\\tb c\\n\\t d')\n ['a', 'b c\\n', ' d']\n \"\"\"\n\n _string = \"\\t\"\n\n\nclass CharTokenizer(StringTokenizer):\n \"\"\"Tokenize a string into individual characters. If this functionality\n is ever required directly, use ``for char in string``.\n \"\"\"\n\n def tokenize(self, s):\n return list(s)\n\n def span_tokenize(self, s):\n yield from enumerate(range(1, len(s) + 1))\n\n\nclass LineTokenizer(TokenizerI):\n r\"\"\"Tokenize a string into its lines, optionally discarding blank lines.\n This is similar to ``s.split('\\n')``.\n\n >>> from nltk.tokenize import LineTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n >>> # same as [l for l in s.split('\\n') if l.strip()]:\n >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', 'Thanks.']\n\n :param blanklines: Indicates how blank lines should be handled. Valid values are:\n\n - ``discard``: strip blank lines out of the token list before returning it.\n A line is considered blank if it contains only whitespace characters.\n - ``keep``: leave all blank lines in the token list.\n - ``discard-eof``: if the string ends with a newline, then do not generate\n a corresponding token ``''`` after that newline.\n \"\"\"\n\n def __init__(self, blanklines=\"discard\"):\n valid_blanklines = (\"discard\", \"keep\", \"discard-eof\")\n if blanklines not in valid_blanklines:\n raise ValueError(\n \"Blank lines must be one of: %s\" % \" \".join(valid_blanklines)\n )\n\n self._blanklines = blanklines\n\n def tokenize(self, s):\n lines = s.splitlines()\n # If requested, strip off blank lines.\n if self._blanklines == \"discard\":\n lines = [l for l in lines if l.rstrip()]\n elif self._blanklines == \"discard-eof\":\n if lines and not lines[-1].strip():\n lines.pop()\n return lines\n\n # discard-eof not implemented\n def span_tokenize(self, s):\n if self._blanklines == \"keep\":\n yield from string_span_tokenize(s, r\"\\n\")\n else:\n yield from regexp_span_tokenize(s, r\"\\n(\\s+\\n)*\")\n\n\n######################################################################\n# { Tokenization Functions\n######################################################################\n# XXX: it is stated in module docs that there is no function versions\n\n\ndef line_tokenize(text, blanklines=\"discard\"):\n return LineTokenizer(blanklines).tokenize(text)\n","path":"nltk/tokenize/simple.py"}],"string":"[\n {\n \"content\": \"# Natural Language Toolkit: Simple Tokenizers\\n#\\n# Copyright (C) 2001-2023 NLTK Project\\n# Author: Edward Loper \\n# Steven Bird \\n# URL: \\n# For license information, see LICENSE.TXT\\n\\nr\\\"\\\"\\\"\\nSimple Tokenizers\\n\\nThese tokenizers divide strings into substrings using the string\\n``split()`` method.\\nWhen tokenizing using a particular delimiter string, use\\nthe string ``split()`` method directly, as this is more efficient.\\n\\nThe simple tokenizers are *not* available as separate functions;\\ninstead, you should just use the string ``split()`` method directly:\\n\\n >>> s = \\\"Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\n\\\\nThanks.\\\"\\n >>> s.split() # doctest: +NORMALIZE_WHITESPACE\\n ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',\\n 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']\\n >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE\\n ['Good', 'muffins', 'cost', '$3.88\\\\nin', 'New', 'York.', '',\\n 'Please', 'buy', 'me\\\\ntwo', 'of', 'them.\\\\n\\\\nThanks.']\\n >>> s.split('\\\\n') # doctest: +NORMALIZE_WHITESPACE\\n ['Good muffins cost $3.88', 'in New York. Please buy me',\\n 'two of them.', '', 'Thanks.']\\n\\nThe simple tokenizers are mainly useful because they follow the\\nstandard ``TokenizerI`` interface, and so can be used with any code\\nthat expects a tokenizer. For example, these tokenizers can be used\\nto specify the tokenization conventions when building a `CorpusReader`.\\n\\n\\\"\\\"\\\"\\n\\nfrom nltk.tokenize.api import StringTokenizer, TokenizerI\\nfrom nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize\\n\\n\\nclass SpaceTokenizer(StringTokenizer):\\n r\\\"\\\"\\\"Tokenize a string using the space character as a delimiter,\\n which is the same as ``s.split(' ')``.\\n\\n >>> from nltk.tokenize import SpaceTokenizer\\n >>> s = \\\"Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\n\\\\nThanks.\\\"\\n >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE\\n ['Good', 'muffins', 'cost', '$3.88\\\\nin', 'New', 'York.', '',\\n 'Please', 'buy', 'me\\\\ntwo', 'of', 'them.\\\\n\\\\nThanks.']\\n \\\"\\\"\\\"\\n\\n _string = \\\" \\\"\\n\\n\\nclass TabTokenizer(StringTokenizer):\\n r\\\"\\\"\\\"Tokenize a string use the tab character as a delimiter,\\n the same as ``s.split('\\\\t')``.\\n\\n >>> from nltk.tokenize import TabTokenizer\\n >>> TabTokenizer().tokenize('a\\\\tb c\\\\n\\\\t d')\\n ['a', 'b c\\\\n', ' d']\\n \\\"\\\"\\\"\\n\\n _string = \\\"\\\\t\\\"\\n\\n\\nclass CharTokenizer(StringTokenizer):\\n \\\"\\\"\\\"Tokenize a string into individual characters. If this functionality\\n is ever required directly, use ``for char in string``.\\n \\\"\\\"\\\"\\n\\n def tokenize(self, s):\\n return list(s)\\n\\n def span_tokenize(self, s):\\n yield from enumerate(range(1, len(s) + 1))\\n\\n\\nclass LineTokenizer(TokenizerI):\\n r\\\"\\\"\\\"Tokenize a string into its lines, optionally discarding blank lines.\\n This is similar to ``s.split('\\\\n')``.\\n\\n >>> from nltk.tokenize import LineTokenizer\\n >>> s = \\\"Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\n\\\\nThanks.\\\"\\n >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\\n ['Good muffins cost $3.88', 'in New York. Please buy me',\\n 'two of them.', '', 'Thanks.']\\n >>> # same as [l for l in s.split('\\\\n') if l.strip()]:\\n >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\\n ['Good muffins cost $3.88', 'in New York. Please buy me',\\n 'two of them.', 'Thanks.']\\n\\n :param blanklines: Indicates how blank lines should be handled. Valid values are:\\n\\n - ``discard``: strip blank lines out of the token list before returning it.\\n A line is considered blank if it contains only whitespace characters.\\n - ``keep``: leave all blank lines in the token list.\\n - ``discard-eof``: if the string ends with a newline, then do not generate\\n a corresponding token ``''`` after that newline.\\n \\\"\\\"\\\"\\n\\n def __init__(self, blanklines=\\\"discard\\\"):\\n valid_blanklines = (\\\"discard\\\", \\\"keep\\\", \\\"discard-eof\\\")\\n if blanklines not in valid_blanklines:\\n raise ValueError(\\n \\\"Blank lines must be one of: %s\\\" % \\\" \\\".join(valid_blanklines)\\n )\\n\\n self._blanklines = blanklines\\n\\n def tokenize(self, s):\\n lines = s.splitlines()\\n # If requested, strip off blank lines.\\n if self._blanklines == \\\"discard\\\":\\n lines = [l for l in lines if l.rstrip()]\\n elif self._blanklines == \\\"discard-eof\\\":\\n if lines and not lines[-1].strip():\\n lines.pop()\\n return lines\\n\\n # discard-eof not implemented\\n def span_tokenize(self, s):\\n if self._blanklines == \\\"keep\\\":\\n yield from string_span_tokenize(s, r\\\"\\\\n\\\")\\n else:\\n yield from regexp_span_tokenize(s, r\\\"\\\\n(\\\\s+\\\\n)*\\\")\\n\\n\\n######################################################################\\n# { Tokenization Functions\\n######################################################################\\n# XXX: it is stated in module docs that there is no function versions\\n\\n\\ndef line_tokenize(text, blanklines=\\\"discard\\\"):\\n return LineTokenizer(blanklines).tokenize(text)\\n\",\n \"path\": \"nltk/tokenize/simple.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# Natural Language Toolkit: Simple Tokenizers\n#\n# Copyright (C) 2001-2023 NLTK Project\n# Author: Edward Loper \n# Steven Bird \n# URL: \n# For license information, see LICENSE.TXT\n\nr\"\"\"\nSimple Tokenizers\n\nThese tokenizers divide strings into substrings using the string\n``split()`` method.\nWhen tokenizing using a particular delimiter string, use\nthe string ``split()`` method directly, as this is more efficient.\n\nThe simple tokenizers are *not* available as separate functions;\ninstead, you should just use the string ``split()`` method directly:\n\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> s.split() # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',\n 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']\n >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n >>> s.split('\\n') # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n\nThe simple tokenizers are mainly useful because they follow the\nstandard ``TokenizerI`` interface, and so can be used with any code\nthat expects a tokenizer. For example, these tokenizers can be used\nto specify the tokenization conventions when building a `CorpusReader`.\n\n\"\"\"\n\nfrom nltk.tokenize.api import StringTokenizer, TokenizerI\nfrom nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize\n\n\nclass SpaceTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string using the space character as a delimiter,\n which is the same as ``s.split(' ')``.\n\n >>> from nltk.tokenize import SpaceTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n \"\"\"\n\n _string = \" \"\n\n\nclass TabTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string use the tab character as a delimiter,\n the same as ``s.split('\\t')``.\n\n >>> from nltk.tokenize import TabTokenizer\n >>> TabTokenizer().tokenize('a\\tb c\\n\\t d')\n ['a', 'b c\\n', ' d']\n \"\"\"\n\n _string = \"\\t\"\n\n\nclass CharTokenizer(StringTokenizer):\n \"\"\"Tokenize a string into individual characters. If this functionality\n is ever required directly, use ``for char in string``.\n \"\"\"\n\n _string = None\n\n def tokenize(self, s):\n return list(s)\n\n def span_tokenize(self, s):\n yield from enumerate(range(1, len(s) + 1))\n\n\nclass LineTokenizer(TokenizerI):\n r\"\"\"Tokenize a string into its lines, optionally discarding blank lines.\n This is similar to ``s.split('\\n')``.\n\n >>> from nltk.tokenize import LineTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n >>> # same as [l for l in s.split('\\n') if l.strip()]:\n >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', 'Thanks.']\n\n :param blanklines: Indicates how blank lines should be handled. Valid values are:\n\n - ``discard``: strip blank lines out of the token list before returning it.\n A line is considered blank if it contains only whitespace characters.\n - ``keep``: leave all blank lines in the token list.\n - ``discard-eof``: if the string ends with a newline, then do not generate\n a corresponding token ``''`` after that newline.\n \"\"\"\n\n def __init__(self, blanklines=\"discard\"):\n valid_blanklines = (\"discard\", \"keep\", \"discard-eof\")\n if blanklines not in valid_blanklines:\n raise ValueError(\n \"Blank lines must be one of: %s\" % \" \".join(valid_blanklines)\n )\n\n self._blanklines = blanklines\n\n def tokenize(self, s):\n lines = s.splitlines()\n # If requested, strip off blank lines.\n if self._blanklines == \"discard\":\n lines = [l for l in lines if l.rstrip()]\n elif self._blanklines == \"discard-eof\":\n if lines and not lines[-1].strip():\n lines.pop()\n return lines\n\n # discard-eof not implemented\n def span_tokenize(self, s):\n if self._blanklines == \"keep\":\n yield from string_span_tokenize(s, r\"\\n\")\n else:\n yield from regexp_span_tokenize(s, r\"\\n(\\s+\\n)*\")\n\n\n######################################################################\n# { Tokenization Functions\n######################################################################\n# XXX: it is stated in module docs that there is no function versions\n\n\ndef line_tokenize(text, blanklines=\"discard\"):\n return LineTokenizer(blanklines).tokenize(text)\n","path":"nltk/tokenize/simple.py"}],"string":"[\n {\n \"content\": \"# Natural Language Toolkit: Simple Tokenizers\\n#\\n# Copyright (C) 2001-2023 NLTK Project\\n# Author: Edward Loper \\n# Steven Bird \\n# URL: \\n# For license information, see LICENSE.TXT\\n\\nr\\\"\\\"\\\"\\nSimple Tokenizers\\n\\nThese tokenizers divide strings into substrings using the string\\n``split()`` method.\\nWhen tokenizing using a particular delimiter string, use\\nthe string ``split()`` method directly, as this is more efficient.\\n\\nThe simple tokenizers are *not* available as separate functions;\\ninstead, you should just use the string ``split()`` method directly:\\n\\n >>> s = \\\"Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\n\\\\nThanks.\\\"\\n >>> s.split() # doctest: +NORMALIZE_WHITESPACE\\n ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',\\n 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']\\n >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE\\n ['Good', 'muffins', 'cost', '$3.88\\\\nin', 'New', 'York.', '',\\n 'Please', 'buy', 'me\\\\ntwo', 'of', 'them.\\\\n\\\\nThanks.']\\n >>> s.split('\\\\n') # doctest: +NORMALIZE_WHITESPACE\\n ['Good muffins cost $3.88', 'in New York. Please buy me',\\n 'two of them.', '', 'Thanks.']\\n\\nThe simple tokenizers are mainly useful because they follow the\\nstandard ``TokenizerI`` interface, and so can be used with any code\\nthat expects a tokenizer. For example, these tokenizers can be used\\nto specify the tokenization conventions when building a `CorpusReader`.\\n\\n\\\"\\\"\\\"\\n\\nfrom nltk.tokenize.api import StringTokenizer, TokenizerI\\nfrom nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize\\n\\n\\nclass SpaceTokenizer(StringTokenizer):\\n r\\\"\\\"\\\"Tokenize a string using the space character as a delimiter,\\n which is the same as ``s.split(' ')``.\\n\\n >>> from nltk.tokenize import SpaceTokenizer\\n >>> s = \\\"Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\n\\\\nThanks.\\\"\\n >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE\\n ['Good', 'muffins', 'cost', '$3.88\\\\nin', 'New', 'York.', '',\\n 'Please', 'buy', 'me\\\\ntwo', 'of', 'them.\\\\n\\\\nThanks.']\\n \\\"\\\"\\\"\\n\\n _string = \\\" \\\"\\n\\n\\nclass TabTokenizer(StringTokenizer):\\n r\\\"\\\"\\\"Tokenize a string use the tab character as a delimiter,\\n the same as ``s.split('\\\\t')``.\\n\\n >>> from nltk.tokenize import TabTokenizer\\n >>> TabTokenizer().tokenize('a\\\\tb c\\\\n\\\\t d')\\n ['a', 'b c\\\\n', ' d']\\n \\\"\\\"\\\"\\n\\n _string = \\\"\\\\t\\\"\\n\\n\\nclass CharTokenizer(StringTokenizer):\\n \\\"\\\"\\\"Tokenize a string into individual characters. If this functionality\\n is ever required directly, use ``for char in string``.\\n \\\"\\\"\\\"\\n\\n _string = None\\n\\n def tokenize(self, s):\\n return list(s)\\n\\n def span_tokenize(self, s):\\n yield from enumerate(range(1, len(s) + 1))\\n\\n\\nclass LineTokenizer(TokenizerI):\\n r\\\"\\\"\\\"Tokenize a string into its lines, optionally discarding blank lines.\\n This is similar to ``s.split('\\\\n')``.\\n\\n >>> from nltk.tokenize import LineTokenizer\\n >>> s = \\\"Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\n\\\\nThanks.\\\"\\n >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\\n ['Good muffins cost $3.88', 'in New York. Please buy me',\\n 'two of them.', '', 'Thanks.']\\n >>> # same as [l for l in s.split('\\\\n') if l.strip()]:\\n >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\\n ['Good muffins cost $3.88', 'in New York. Please buy me',\\n 'two of them.', 'Thanks.']\\n\\n :param blanklines: Indicates how blank lines should be handled. Valid values are:\\n\\n - ``discard``: strip blank lines out of the token list before returning it.\\n A line is considered blank if it contains only whitespace characters.\\n - ``keep``: leave all blank lines in the token list.\\n - ``discard-eof``: if the string ends with a newline, then do not generate\\n a corresponding token ``''`` after that newline.\\n \\\"\\\"\\\"\\n\\n def __init__(self, blanklines=\\\"discard\\\"):\\n valid_blanklines = (\\\"discard\\\", \\\"keep\\\", \\\"discard-eof\\\")\\n if blanklines not in valid_blanklines:\\n raise ValueError(\\n \\\"Blank lines must be one of: %s\\\" % \\\" \\\".join(valid_blanklines)\\n )\\n\\n self._blanklines = blanklines\\n\\n def tokenize(self, s):\\n lines = s.splitlines()\\n # If requested, strip off blank lines.\\n if self._blanklines == \\\"discard\\\":\\n lines = [l for l in lines if l.rstrip()]\\n elif self._blanklines == \\\"discard-eof\\\":\\n if lines and not lines[-1].strip():\\n lines.pop()\\n return lines\\n\\n # discard-eof not implemented\\n def span_tokenize(self, s):\\n if self._blanklines == \\\"keep\\\":\\n yield from string_span_tokenize(s, r\\\"\\\\n\\\")\\n else:\\n yield from regexp_span_tokenize(s, r\\\"\\\\n(\\\\s+\\\\n)*\\\")\\n\\n\\n######################################################################\\n# { Tokenization Functions\\n######################################################################\\n# XXX: it is stated in module docs that there is no function versions\\n\\n\\ndef line_tokenize(text, blanklines=\\\"discard\\\"):\\n return LineTokenizer(blanklines).tokenize(text)\\n\",\n \"path\": \"nltk/tokenize/simple.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/nltk/test/unit/test_tokenize.py b/nltk/test/unit/test_tokenize.py\nindex 7688f52397..662b4562b2 100644\n--- a/nltk/test/unit/test_tokenize.py\n+++ b/nltk/test/unit/test_tokenize.py\n@@ -16,6 +16,7 @@\n sent_tokenize,\n word_tokenize,\n )\n+from nltk.tokenize.simple import CharTokenizer\n \n \n def load_stanford_segmenter():\n@@ -865,3 +866,21 @@ class ExtLangVars(punkt.PunktLanguageVars):\n )\n def test_sent_tokenize(self, sentences: str, expected: List[str]):\n assert sent_tokenize(sentences) == expected\n+\n+ def test_string_tokenizer(self) -> None:\n+ sentence = \"Hello there\"\n+ tokenizer = CharTokenizer()\n+ assert tokenizer.tokenize(sentence) == list(sentence)\n+ assert list(tokenizer.span_tokenize(sentence)) == [\n+ (0, 1),\n+ (1, 2),\n+ (2, 3),\n+ (3, 4),\n+ (4, 5),\n+ (5, 6),\n+ (6, 7),\n+ (7, 8),\n+ (8, 9),\n+ (9, 10),\n+ (10, 11),\n+ ]\ndiff --git a/nltk/tokenize/simple.py b/nltk/tokenize/simple.py\nindex 71a02d3098..54b2bf8440 100644\n--- a/nltk/tokenize/simple.py\n+++ b/nltk/tokenize/simple.py\n@@ -70,6 +70,8 @@ class CharTokenizer(StringTokenizer):\n is ever required directly, use ``for char in string``.\n \"\"\"\n \n+ _string = None\n+\n def tokenize(self, s):\n return list(s)\n \n"}}},{"rowIdx":440,"cells":{"in_source_id":{"kind":"string","value":"wemake-services__wemake-python-styleguide-200"},"issue":{"kind":"string","value":"Feature: allow magic numbers in async functions constructors\nWe check that some magic numbers can be used in function constructors like so:\r\n\r\n```python\r\ndef some_function(price, delta=0.1):\r\n return price * delta\r\n```\r\n\r\nBut, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21\r\n\r\nWhat we need to do is:\r\n1. Add `ast.AsyncFunctionDef` to the allowed list\r\n2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\n"},"before_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n","path":"wemake_python_styleguide/visitors/ast/numbers.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n\\nimport ast\\nfrom typing import Optional\\n\\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\\nfrom wemake_python_styleguide.violations.best_practices import (\\n MagicNumberViolation,\\n)\\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\\n\\n\\nclass MagicNumberVisitor(BaseNodeVisitor):\\n \\\"\\\"\\\"Checks magic numbers used in the code.\\\"\\\"\\\"\\n\\n _ALLOWED_PARENTS = (\\n ast.Assign,\\n\\n # Constructor usages:\\n ast.FunctionDef,\\n ast.arguments,\\n\\n # Primitives:\\n ast.List,\\n ast.Dict,\\n ast.Set,\\n ast.Tuple,\\n )\\n\\n # TODO: make consistent naming rules for class attributes:\\n _PROXY_PARENTS = (\\n ast.UnaryOp,\\n )\\n\\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\\n \\\"\\\"\\\"\\n Returns real number's parent.\\n\\n What can go wrong?\\n\\n 1. Number can be negative: ``x = -1``,\\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\\n\\n \\\"\\\"\\\"\\n parent = getattr(node, 'parent', None)\\n if isinstance(parent, self._PROXY_PARENTS):\\n return self._get_real_parent(parent)\\n return parent\\n\\n def _check_is_magic(self, node: ast.Num) -> None:\\n parent = self._get_real_parent(node)\\n if isinstance(parent, self._ALLOWED_PARENTS):\\n return\\n\\n if node.n in MAGIC_NUMBERS_WHITELIST:\\n return\\n\\n if isinstance(node.n, int) and node.n <= 10:\\n return\\n\\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\\n\\n def visit_Num(self, node: ast.Num) -> None:\\n \\\"\\\"\\\"\\n Checks numbers not to be magic constants inside the code.\\n\\n Raises:\\n MagicNumberViolation\\n\\n \\\"\\\"\\\"\\n self._check_is_magic(node)\\n self.generic_visit(node)\\n\",\n \"path\": \"wemake_python_styleguide/visitors/ast/numbers.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.AsyncFunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n","path":"wemake_python_styleguide/visitors/ast/numbers.py"}],"string":"[\n {\n \"content\": \"# -*- coding: utf-8 -*-\\n\\nimport ast\\nfrom typing import Optional\\n\\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\\nfrom wemake_python_styleguide.violations.best_practices import (\\n MagicNumberViolation,\\n)\\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\\n\\n\\nclass MagicNumberVisitor(BaseNodeVisitor):\\n \\\"\\\"\\\"Checks magic numbers used in the code.\\\"\\\"\\\"\\n\\n _ALLOWED_PARENTS = (\\n ast.Assign,\\n\\n # Constructor usages:\\n ast.FunctionDef,\\n ast.AsyncFunctionDef,\\n ast.arguments,\\n\\n # Primitives:\\n ast.List,\\n ast.Dict,\\n ast.Set,\\n ast.Tuple,\\n )\\n\\n _PROXY_PARENTS = (\\n ast.UnaryOp,\\n )\\n\\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\\n \\\"\\\"\\\"\\n Returns real number's parent.\\n\\n What can go wrong?\\n\\n 1. Number can be negative: ``x = -1``,\\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\\n\\n \\\"\\\"\\\"\\n parent = getattr(node, 'parent', None)\\n if isinstance(parent, self._PROXY_PARENTS):\\n return self._get_real_parent(parent)\\n return parent\\n\\n def _check_is_magic(self, node: ast.Num) -> None:\\n parent = self._get_real_parent(node)\\n if isinstance(parent, self._ALLOWED_PARENTS):\\n return\\n\\n if node.n in MAGIC_NUMBERS_WHITELIST:\\n return\\n\\n if isinstance(node.n, int) and node.n <= 10:\\n return\\n\\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\\n\\n def visit_Num(self, node: ast.Num) -> None:\\n \\\"\\\"\\\"\\n Checks numbers not to be magic constants inside the code.\\n\\n Raises:\\n MagicNumberViolation\\n\\n \\\"\\\"\\\"\\n self._check_is_magic(node)\\n self.generic_visit(node)\\n\",\n \"path\": \"wemake_python_styleguide/visitors/ast/numbers.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/tests/test_visitors/test_ast/test_general/test_magic_numbers.py b/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\nindex 0b1128a40..98d860c81 100644\n--- a/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\n+++ b/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\n@@ -20,6 +20,11 @@ def function_name(param1, param2={0}):\n return param1 / param2\n \"\"\"\n \n+async_function_definition = \"\"\"\n+async def function_name(param1, param2={0}):\n+ return param1 / param2\n+\"\"\"\n+\n list_definition = '[{0}]'\n dict_definition_key = '{{{0}: \"value\"}}'\n dict_definition_value = '{{\"first\": {0}}}'\n@@ -47,6 +52,7 @@ def function_name(param1, param2={0}):\n assignment,\n assignment_unary,\n function_definition,\n+ async_function_definition,\n list_definition,\n dict_definition_key,\n dict_definition_value,\ndiff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py\nindex 30a8859b9..1bb34a335 100644\n--- a/wemake_python_styleguide/visitors/ast/numbers.py\n+++ b/wemake_python_styleguide/visitors/ast/numbers.py\n@@ -18,6 +18,7 @@ class MagicNumberVisitor(BaseNodeVisitor):\n \n # Constructor usages:\n ast.FunctionDef,\n+ ast.AsyncFunctionDef,\n ast.arguments,\n \n # Primitives:\n"}}},{"rowIdx":441,"cells":{"in_source_id":{"kind":"string","value":"learningequality__kolibri-8895"},"issue":{"kind":"string","value":"Reports - Missing answered question data after upgrade from 0.14.7 to 0.15\n## Observed behavior\r\nThis issue is a follow-up of https://github.com/learningequality/kolibri/pull/8818\r\nWhen I'm logged in as a Coach and I go to the reports immediately after I've upgraded Kolibri from 0.14.7 to 0.15 and I attempt to go through the completed lessons or quizzes I'm not able to see the actual questions, instead I see the following text: No attempts made on this question.\r\n\r\n## Expected behavior\r\nAll the completion details should be displayed.\r\n\r\n## Steps to reproduce the issue\r\n1. Install the a 0.14.7 version of Kolibri from [here](https://learningequality.org/r/kolibri-windows-setup-latest).\r\n2. Setup a facility, create classes, users, lesson etc and complete a lesson and a quiz using exercises such as CK12's 'Make 10 (grids and number bonds)'.\r\n3. Upgrade to the 0.15 version by Installing the following [build.](https://buildkite.com/learningequality/kolibri-python-package/builds/4467).\r\n4. Go to Coach>Reports and observe the reports for the completed lesson and quiz. (Note that after restart of Kolibri the issue is observed only for the completed lesson while the quiz data is displayed correctly)\r\n\r\n## Additional information\r\n\r\n![2021-12-07_16-19-30](https://user-images.githubusercontent.com/79847249/145059555-47a4535f-9f9c-4118-b059-05487be78d51.png)\r\n\r\n## Logs and DB files: \r\n[UbuntuDBlogs.zip](https://github.com/learningequality/kolibri/files/7669547/UbuntuDBlogs.zip)\r\n[WindowsDBLogs.zip](https://github.com/learningequality/kolibri/files/7669548/WindowsDBLogs.zip)\r\n\r\n## Usage Details\r\n - OS: Windows 10\r\n - Browser: Chrome\n"},"before_files":{"kind":"list like","value":[{"content":"import logging\nfrom datetime import timedelta\nfrom itertools import groupby\nfrom random import randint\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.http import Http404\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import UUIDFilter\nfrom le_utils.constants import content_kinds\nfrom le_utils.constants import exercises\nfrom rest_framework import filters\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom .models import AttemptLog\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom .models import MasteryLog\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.auth.models import dataset_cache\nfrom kolibri.core.content.api import OptionalPageNumberPagination\nfrom kolibri.core.content.models import AssessmentMetaData\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.constants import interaction_types\nfrom kolibri.core.logger.constants.exercise_attempts import MAPPING\nfrom kolibri.core.notifications.api import create_summarylog\nfrom kolibri.core.notifications.api import parse_attemptslog\nfrom kolibri.core.notifications.api import parse_summarylog\nfrom kolibri.core.notifications.api import quiz_answered_notification\nfrom kolibri.core.notifications.api import quiz_completed_notification\nfrom kolibri.core.notifications.api import quiz_started_notification\nfrom kolibri.core.notifications.tasks import wrap_to_save_queue\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass HexStringUUIDField(serializers.UUIDField):\n def __init__(self, **kwargs):\n self.uuid_format = \"hex\"\n super(HexStringUUIDField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n return super(HexStringUUIDField, self).to_internal_value(data).hex\n\n\nclass StartSessionSerializer(serializers.Serializer):\n lesson_id = HexStringUUIDField(required=False)\n node_id = HexStringUUIDField(required=False)\n # Do this as a special way of handling our coach generated quizzes\n quiz_id = HexStringUUIDField(required=False)\n # A flag to indicate whether to start the session over again\n repeat = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if \"quiz_id\" in data and (\"lesson_id\" in data or \"node_id\" in data):\n raise ValidationError(\"quiz_id must not be mixed with other context\")\n if \"node_id\" not in data and \"quiz_id\" not in data:\n raise ValidationError(\"node_id is required if not a coach assigned quiz\")\n return data\n\n\nclass InteractionSerializer(serializers.Serializer):\n id = HexStringUUIDField(required=False)\n item = serializers.CharField()\n correct = serializers.FloatField(min_value=0, max_value=1)\n complete = serializers.BooleanField(required=False, default=False)\n time_spent = serializers.FloatField(min_value=0)\n\n answer = serializers.DictField(required=False)\n simple_answer = serializers.CharField(required=False, allow_blank=True)\n error = serializers.BooleanField(required=False, default=False)\n hinted = serializers.BooleanField(required=False, default=False)\n # Whether to replace the current answer with the new answer\n # this is a no-op if the attempt is being created.\n replace = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if not data[\"error\"] and \"answer\" not in data:\n raise ValidationError(\"Must provide an answer if not an error\")\n return data\n\n\nclass UpdateSessionSerializer(serializers.Serializer):\n progress_delta = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n progress = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n time_spent_delta = serializers.FloatField(min_value=0, required=False)\n extra_fields = serializers.DictField(required=False)\n interactions = InteractionSerializer(required=False, many=True)\n\n def validate(self, data):\n if \"progress_delta\" in data and \"progress\" in data:\n raise ValidationError(\n \"must not pass progress_delta and progress in the same request\"\n )\n return data\n\n\n# The lowest integer that can be encoded\n# in a Django IntegerField across all backends\nMIN_INTEGER = -2147483648\n\n\nattemptlog_fields = [\n \"id\",\n \"correct\",\n \"complete\",\n \"hinted\",\n \"error\",\n \"item\",\n \"answer\",\n \"time_spent\",\n]\n\n\nclass LogContext(object):\n \"\"\"\n Object used to provide a limited dict like interface for encoding the\n context that can be stored in the sessionlog, and which is then\n returned to the frontend as part of the initialization of a content\n session.\n node_id - represents a specific ContentNode in a topic tree, while the\n content_id for that node is recorded directly on the sessionlog.\n quiz_id - represents the id of the Exam Model object that this session\n is regarding (if any).\n lesson_id - represents the id of the lesson this node_id is being engaged\n with from within (if any).\n mastery_level - represents the current 'try' at an assessment, whether an exercise\n a practice quiz or a coach assigned quiz. Different mastery_level values\n indicate a different try at the assessment.\n\n This is used to encode the values that are sent when initializing a session\n (see its use in the _get_context method below)\n and then also used to hold the values from an existing sessionlog when\n updating a session (see _update_session method).\n \"\"\"\n\n __slots__ = \"node_id\", \"quiz_id\", \"lesson_id\", \"mastery_level\"\n\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n self[key] = value\n\n def __setitem__(self, key, value):\n if key not in self.__slots__:\n return\n setattr(self, key, value)\n\n def __getitem__(self, key):\n if key not in self.__slots__:\n return\n return getattr(self, key, None)\n\n def __contains__(self, key):\n return key in self.__slots__ and hasattr(self, key)\n\n def to_dict(self):\n \"\"\"\n Provide a dictionary of the keys stored in the context object.\n Used to serialize for inclusion in an API Response.\n \"\"\"\n output = {}\n for slot in self.__slots__:\n if hasattr(self, slot):\n output[slot] = getattr(self, slot)\n return output\n\n\nclass ProgressTrackingViewSet(viewsets.GenericViewSet):\n def _precache_dataset_id(self, user):\n if user is None or user.is_anonymous():\n return\n key = ContentSessionLog.get_related_dataset_cache_key(\n user.id, user._meta.db_table\n )\n dataset_cache.set(key, user.dataset_id)\n\n def _check_quiz_permissions(self, user, quiz_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a quiz if not logged in\")\n if not Exam.objects.filter(\n active=True,\n assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=quiz_id,\n ).exists():\n raise PermissionDenied(\"User does not have access to this quiz_id\")\n\n def _check_lesson_permissions(self, user, lesson_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a lesson if not logged in\")\n if not Lesson.objects.filter(\n lesson_assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=lesson_id,\n ).exists():\n raise ValidationError(\"Invalid lesson_id\")\n\n def _get_context(self, user, validated_data):\n node_id = validated_data.get(\"node_id\")\n quiz_id = validated_data.get(\"quiz_id\")\n lesson_id = validated_data.get(\"lesson_id\")\n\n context = LogContext()\n\n if node_id is not None:\n try:\n node = (\n ContentNode.objects.annotate(\n mastery_model=Subquery(\n AssessmentMetaData.objects.filter(\n contentnode_id=OuterRef(\"id\")\n ).values_list(\"mastery_model\", flat=True)[:1]\n )\n )\n .values(\"content_id\", \"channel_id\", \"kind\", \"mastery_model\")\n .get(id=node_id)\n )\n mastery_model = node[\"mastery_model\"]\n content_id = node[\"content_id\"]\n channel_id = node[\"channel_id\"]\n kind = node[\"kind\"]\n context[\"node_id\"] = node_id\n if lesson_id:\n self._check_lesson_permissions(user, lesson_id)\n context[\"lesson_id\"] = lesson_id\n except ContentNode.DoesNotExist:\n raise ValidationError(\"Invalid node_id\")\n elif quiz_id is not None:\n self._check_quiz_permissions(user, quiz_id)\n mastery_model = {\"type\": \"quiz\", \"coach_assigned\": True}\n content_id = quiz_id\n channel_id = None\n kind = content_kinds.QUIZ\n context[\"quiz_id\"] = quiz_id\n return content_id, channel_id, kind, mastery_model, context\n\n def _get_or_create_summarylog(\n self,\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n ):\n if not user:\n output = {\n \"progress\": 0,\n \"extra_fields\": {},\n \"time_spent\": 0,\n \"complete\": False,\n }\n if mastery_model:\n output.update(\n {\n \"mastery_criterion\": mastery_model,\n \"pastattempts\": [],\n \"totalattempts\": 0,\n \"complete\": False,\n }\n )\n return output\n\n try:\n summarylog = ContentSummaryLog.objects.get(\n content_id=content_id,\n user=user,\n )\n updated_fields = (\"end_timestamp\", \"channel_id\", \"_morango_dirty_bit\")\n if repeat:\n summarylog.progress = 0\n updated_fields += (\"progress\",)\n summarylog.channel_id = channel_id\n summarylog.end_timestamp = start_timestamp\n summarylog.save(update_fields=updated_fields)\n except ContentSummaryLog.DoesNotExist:\n summarylog = ContentSummaryLog.objects.create(\n content_id=content_id,\n user=user,\n channel_id=channel_id,\n kind=kind,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n )\n self._process_created_notification(summarylog, context)\n\n output = {\n \"progress\": summarylog.progress,\n \"extra_fields\": summarylog.extra_fields,\n \"time_spent\": summarylog.time_spent,\n \"complete\": summarylog.progress >= 1,\n }\n if mastery_model:\n assessment_output, mastery_level = self._start_assessment_session(\n mastery_model,\n summarylog,\n user,\n start_timestamp,\n repeat,\n context,\n )\n output.update(assessment_output)\n context[\"mastery_level\"] = mastery_level\n return output\n\n def create(self, request):\n \"\"\"\n Make a POST request to start a content session.\n\n Requires one of either:\n - node_id: the pk of the resource\n - quiz_id: the pk of the quiz (Exam) object\n\n Optional parameters:\n - repeat: whether to reset previous progress on this content to zero and start fresh\n - lesson_id: if this is being engaged within a lesson\n\n Returns object with properties:\n - session_id: id of the session object that was created by this call\n - context: contains node_id, quiz_id, lesson_id, and mastery_level as appropriate\n - progress: any previous progress on this content resource\n - time_spent: any previous time spent on this content resource\n - extra_fields: any previously recorded additional data stored for this resource\n - complete: whether this resource is completed by this user\n\n If this is an assessment, return object will also include:\n - mastery_criterion: mastery criterion that should be applied to determine completion\n - pastattempts: serialized subset of recent responses, used to determine completion\n - totalattempts: total number of previous responses within this run of the assessment resource\n \"\"\"\n serializer = StartSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n start_timestamp = local_now()\n repeat = serializer.validated_data[\"repeat\"]\n\n content_id, channel_id, kind, mastery_model, context = self._get_context(\n request.user, serializer.validated_data\n )\n\n with transaction.atomic(), dataset_cache:\n\n user = None if request.user.is_anonymous() else request.user\n\n self._precache_dataset_id(user)\n\n output = self._get_or_create_summarylog(\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n )\n\n # Must ensure there is no user here to maintain user privacy for logging.\n visitor_id = (\n request.COOKIES.get(\"visitor_id\")\n if hasattr(request, \"COOKIES\") and not user\n else None\n )\n sessionlog = ContentSessionLog.objects.create(\n content_id=content_id,\n channel_id=channel_id,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n user=user,\n kind=kind,\n visitor_id=visitor_id,\n extra_fields={\"context\": context.to_dict()},\n )\n output.update({\"session_id\": sessionlog.id, \"context\": context.to_dict()})\n return Response(output)\n\n def _process_created_notification(self, summarylog, context):\n # dont create notifications upon creating a summary log for an exercise\n # notifications should only be triggered upon first attempting a question in the exercise\n if \"node_id\" in context and summarylog.kind != content_kinds.EXERCISE:\n # We have sufficient information to only trigger notifications for the specific\n # lesson that this is being engaged with, but until we can work out the exact\n # way that we want to match this with contextual progress tracking, we are\n # not changing this for now.\n wrap_to_save_queue(\n create_summarylog,\n summarylog,\n )\n\n def _process_masterylog_created_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_started_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _check_quiz_log_permissions(self, masterylog):\n if (\n masterylog\n and masterylog.complete\n and masterylog.mastery_criterion.get(\"type\") == \"quiz\"\n and masterylog.mastery_criterion.get(\"coach_assigned\")\n ):\n raise PermissionDenied(\"Cannot update a finished coach assigned quiz\")\n\n def _get_or_create_masterylog(\n self,\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n ):\n masterylog = (\n MasteryLog.objects.filter(\n summarylog=summarylog,\n user=user,\n )\n .order_by(\"-complete\", \"-end_timestamp\")\n .first()\n )\n\n if masterylog is None or (masterylog.complete and repeat):\n # There is no previous masterylog, or the previous masterylog\n # is complete, and the request is requesting a new attempt.\n # Here we generate a mastery_level value - this serves to disambiguate multiple\n # retries at an assessment (either an exercise, practice quiz, or coach assigned quiz).\n # Having the same mastery_level/summarylog (and hence user) pair will result in the same\n # identifier being created. So if the same user engages with the same assessment on different\n # devices, when the data synchronizes, if the mastery_level is the same, this data will be\n # unified under a single try.\n if mastery_model.get(\"coach_assigned\"):\n # To prevent coach assigned quiz mastery logs from propagating to older\n # Kolibri versions, we use negative mastery levels for these.\n # In older versions of Kolibri the mastery_level is validated to be\n # between 1 and 10 - so these values will fail validation and hence will\n # not be deserialized from the morango store.\n # We choose a random integer across the range of acceptable values,\n # in order to prevent collisions across multiple devices when users\n # start different tries of the same coach assigned quiz.\n # With a length of 9 digits for the decimal number, we would need approximately\n # 45 tries to have a 1 in a million chance of a collision.\n # Numbers derived using the formula for the generalized birthday problem:\n # https://en.wikipedia.org/wiki/Birthday_problem#The_generalized_birthday_problem\n # n=sqrt(2*d*ln(1/(1-p))\n # where d is the number of combinations of d digits, p is the probability\n # So for 9 digits, d = 10^9\n # p = 0.000001 for one in a million\n mastery_level = randint(MIN_INTEGER, -1)\n else:\n mastery_level = (\n masterylog.mastery_level + 1 if masterylog is not None else 1\n )\n\n masterylog = MasteryLog.objects.create(\n summarylog=summarylog,\n user=user,\n mastery_criterion=mastery_model,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n mastery_level=mastery_level,\n )\n self._process_masterylog_created_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog\n\n def _start_assessment_session(\n self, mastery_model, summarylog, user, start_timestamp, repeat, context\n ):\n masterylog = self._get_or_create_masterylog(\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n )\n\n mastery_criterion = masterylog.mastery_criterion\n exercise_type = mastery_criterion.get(\"type\")\n attemptlogs = masterylog.attemptlogs.values(*attemptlog_fields).order_by(\n \"-start_timestamp\"\n )\n\n # get the first x logs depending on the exercise type\n if exercise_type == exercises.M_OF_N:\n attemptlogs = attemptlogs[: mastery_criterion[\"n\"]]\n elif exercise_type in MAPPING:\n attemptlogs = attemptlogs[: MAPPING[exercise_type]]\n elif exercise_type == \"quiz\":\n attemptlogs = attemptlogs.order_by()\n else:\n attemptlogs = attemptlogs[:10]\n\n return {\n \"mastery_criterion\": mastery_criterion,\n \"pastattempts\": attemptlogs,\n \"totalattempts\": masterylog.attemptlogs.count(),\n \"complete\": masterylog.complete,\n }, masterylog.mastery_level\n\n def _generate_interaction_summary(self, validated_data):\n if validated_data[\"error\"]:\n return {\n \"type\": interaction_types.ERROR,\n }\n elif validated_data[\"hinted\"]:\n return {\n \"type\": interaction_types.HINT,\n \"answer\": validated_data[\"answer\"],\n }\n return {\n \"type\": interaction_types.ANSWER,\n \"answer\": validated_data[\"answer\"],\n \"correct\": validated_data[\"correct\"],\n }\n\n def _process_masterylog_completed_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_completed_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _update_and_return_mastery_log_id(\n self, user, complete, summarylog_id, end_timestamp, context\n ):\n if not user.is_anonymous() and context[\"mastery_level\"] is not None:\n try:\n masterylog = MasteryLog.objects.get(\n user=user,\n mastery_level=context[\"mastery_level\"],\n summarylog_id=summarylog_id,\n )\n if complete and not masterylog.complete:\n masterylog.complete = True\n masterylog.completion_timestamp = end_timestamp\n masterylog.save(\n update_fields=(\n \"complete\",\n \"completion_timestamp\",\n \"_morango_dirty_bit\",\n )\n )\n self._process_masterylog_completed_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog.id\n except MasteryLog.DoesNotExist:\n raise ValidationError(\n \"Invalid mastery_level value, this session has not been started.\"\n )\n\n def _update_attempt(self, attemptlog, interaction, update_fields, end_timestamp):\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n attemptlog.interaction_history += [interaction_summary]\n attemptlog.end_timestamp = end_timestamp\n attemptlog.time_spent = interaction[\"time_spent\"]\n\n if interaction[\"error\"] and not attemptlog.error:\n attemptlog.error = interaction[\"error\"]\n update_fields.add(\"error\")\n\n # Mark hinted only if it is not already correct, and don't undo previously hinted\n if interaction[\"hinted\"] and not attemptlog.hinted and not attemptlog.correct:\n attemptlog.hinted = interaction[\"hinted\"]\n update_fields.add(\"hinted\")\n\n if interaction[\"replace\"]:\n attemptlog.correct = interaction[\"correct\"]\n update_fields.add(\"correct\")\n\n if \"answer\" in interaction:\n attemptlog.answer = interaction[\"answer\"]\n update_fields.add(\"answer\")\n\n if \"simple_answer\" in interaction:\n attemptlog.simple_answer = interaction[\"simple_answer\"]\n update_fields.add(\"simple_answer\")\n\n if interaction[\"complete\"] and not attemptlog.complete:\n attemptlog.complete = interaction[\"complete\"]\n attemptlog.completion_timestamp = end_timestamp\n update_fields.update({\"complete\", \"completion_timestamp\"})\n\n def _create_attempt(\n self, session_id, masterylog_id, user, interaction, end_timestamp\n ):\n start_timestamp = end_timestamp - timedelta(seconds=interaction[\"time_spent\"])\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n del interaction[\"replace\"]\n\n return AttemptLog(\n sessionlog_id=session_id,\n masterylog_id=masterylog_id,\n interaction_history=[interaction_summary],\n user=user,\n start_timestamp=start_timestamp,\n completion_timestamp=end_timestamp if interaction[\"complete\"] else None,\n end_timestamp=end_timestamp,\n **interaction\n )\n\n def _update_or_create_attempts(\n self, session_id, masterylog_id, user, interactions, end_timestamp, context\n ):\n user = None if user.is_anonymous() else user\n\n output = []\n\n for _, item_interactions in groupby(interactions, lambda x: x[\"item\"]):\n created = False\n update_fields = {\n \"interaction_history\",\n \"end_timestamp\",\n \"time_spent\",\n \"_morango_dirty_bit\",\n }\n item_interactions = list(item_interactions)\n if \"id\" in item_interactions[0]:\n try:\n attemptlog = AttemptLog.objects.get(\n id=item_interactions[0][\"id\"],\n masterylog_id=masterylog_id,\n user=user,\n )\n except AttemptLog.DoesNotExist:\n raise ValidationError(\"Invalid attemptlog id specified\")\n else:\n attemptlog = self._create_attempt(\n session_id,\n masterylog_id,\n user,\n item_interactions[0],\n end_timestamp,\n )\n created = True\n item_interactions = item_interactions[1:]\n updated = bool(item_interactions)\n\n for response in item_interactions:\n self._update_attempt(attemptlog, response, update_fields, end_timestamp)\n\n self._process_attempt_notifications(\n attemptlog, context, user, created, updated\n )\n attemptlog.save(\n update_fields=None if created else update_fields, force_insert=created\n )\n attempt = {}\n for field in attemptlog_fields:\n attempt[field] = getattr(attemptlog, field)\n output.append(attempt)\n return {\"attempts\": output}\n\n def _process_attempt_notifications(\n self, attemptlog, context, user, created, updated\n ):\n if user is None:\n return\n if \"lesson_id\" in context:\n wrap_to_save_queue(parse_attemptslog, attemptlog)\n if created and \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_answered_notification, attemptlog, context[\"quiz_id\"]\n )\n\n def _get_session_log(self, session_id, user):\n try:\n if user.is_anonymous():\n return ContentSessionLog.objects.get(id=session_id, user__isnull=True)\n else:\n return ContentSessionLog.objects.get(id=session_id, user=user)\n except ContentSessionLog.DoesNotExist:\n raise Http404(\n \"ContentSessionLog with id {} does not exist\".format(session_id)\n )\n\n def _normalize_progress(self, progress):\n return max(0, min(1.0, progress))\n\n def _update_content_log(self, log, end_timestamp, validated_data):\n update_fields = (\"end_timestamp\", \"_morango_dirty_bit\")\n\n log.end_timestamp = end_timestamp\n if \"progress_delta\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(\n log.progress + validated_data[\"progress_delta\"]\n )\n elif \"progress\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(validated_data[\"progress\"])\n if \"time_spent_delta\" in validated_data:\n update_fields += (\"time_spent\",)\n log.time_spent += validated_data[\"time_spent_delta\"]\n return update_fields\n\n def _update_summary_log(\n self, user, sessionlog, end_timestamp, validated_data, context\n ):\n if user.is_anonymous():\n return\n summarylog = ContentSummaryLog.objects.get(\n content_id=sessionlog.content_id, user=user\n )\n was_complete = summarylog.progress >= 1\n\n update_fields = self._update_content_log(\n summarylog, end_timestamp, validated_data\n )\n\n if summarylog.progress >= 1 and not was_complete:\n summarylog.completion_timestamp = end_timestamp\n update_fields += (\"completion_timestamp\",)\n self._process_completed_notification(summarylog, context)\n if \"extra_fields\" in validated_data:\n update_fields += (\"extra_fields\",)\n summarylog.extra_fields = validated_data[\"extra_fields\"]\n\n summarylog.save(update_fields=update_fields)\n return summarylog\n\n def _update_session(self, session_id, user, end_timestamp, validated_data):\n sessionlog = self._get_session_log(session_id, user)\n\n context = LogContext(**sessionlog.extra_fields.get(\"context\", {}))\n\n if \"quiz_id\" in context:\n self._check_quiz_permissions(user, context[\"quiz_id\"])\n\n update_fields = self._update_content_log(\n sessionlog, end_timestamp, validated_data\n )\n sessionlog.save(update_fields=update_fields)\n\n summarylog = self._update_summary_log(\n user, sessionlog, end_timestamp, validated_data, context\n )\n\n if summarylog is not None:\n complete = summarylog.progress >= 1\n else:\n complete = sessionlog.progress >= 1\n\n return {\"complete\": complete}, summarylog.id if summarylog else None, context\n\n def _process_completed_notification(self, summarylog, context):\n if \"node_id\" in context:\n wrap_to_save_queue(\n parse_summarylog,\n summarylog,\n )\n\n def update(self, request, pk=None):\n \"\"\"\n Make a PUT request to update the current session\n\n Requires one of either:\n - progress_delta: increase the progress by this amount\n - progress: set the progress to this amount\n\n Can also update time spent recorded with a delta:\n - time_spent_delta: number of seconds to increase time_spent by\n\n And update the extra_fields value stored:\n - extra_fields: the complete representation to set extra_fields to\n\n If creating or updating attempts for an assessment must include:\n - interactions: an array of objects, if updating an existing attempt, must include attempt_id\n\n Returns an object with the properties:\n - complete: boolean indicating if the resource is completed\n\n If an attempt at an assessment was included, then this parameter will be included:\n - attempts: serialized form of the attempt, equivalent to that returned in pastattempts from\n session initialization\n \"\"\"\n if pk is None:\n raise Http404\n serializer = UpdateSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n end_timestamp = local_now()\n validated_data = serializer.validated_data\n\n with transaction.atomic(), dataset_cache:\n self._precache_dataset_id(request.user)\n\n output, summarylog_id, context = self._update_session(\n pk, request.user, end_timestamp, validated_data\n )\n masterylog_id = self._update_and_return_mastery_log_id(\n request.user, output[\"complete\"], summarylog_id, end_timestamp, context\n )\n if \"interactions\" in validated_data:\n attempt_output = self._update_or_create_attempts(\n pk,\n masterylog_id,\n request.user,\n validated_data[\"interactions\"],\n end_timestamp,\n context,\n )\n output.update(attempt_output)\n return Response(output)\n\n\nclass TotalContentProgressViewSet(viewsets.GenericViewSet):\n def retrieve(self, request, pk=None):\n if request.user.is_anonymous() or pk != request.user.id:\n raise PermissionDenied(\"Can only access progress data for self\")\n progress = (\n request.user.contentsummarylog_set.filter(progress=1)\n .aggregate(Sum(\"progress\"))\n .get(\"progress__sum\")\n )\n return Response(\n {\n \"id\": pk,\n \"progress\": progress,\n }\n )\n\n\nclass BaseLogFilter(FilterSet):\n facility = UUIDFilter(method=\"filter_facility\")\n classroom = UUIDFilter(method=\"filter_classroom\")\n learner_group = UUIDFilter(method=\"filter_learner_group\")\n\n # Only a superuser can filter by facilities\n def filter_facility(self, queryset, name, value):\n return queryset.filter(user__facility=value)\n\n def filter_classroom(self, queryset, name, value):\n return queryset.filter(\n Q(user__memberships__collection_id=value)\n | Q(user__memberships__collection__parent_id=value)\n )\n\n def filter_learner_group(self, queryset, name, value):\n return queryset.filter(user__memberships__collection_id=value)\n\n\nclass MasteryFilter(BaseLogFilter):\n content = UUIDFilter(name=\"summarylog__content_id\")\n\n class Meta:\n model = MasteryLog\n fields = [\"content\"]\n\n\nclass MasteryLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n values = (\n \"user\",\n \"summarylog\",\n \"mastery_criterion\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"mastery_level\",\n \"complete\",\n )\n\n\nclass AttemptFilter(BaseLogFilter):\n content = CharFilter(method=\"filter_content\")\n\n def filter_content(self, queryset, name, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = [\"masterylog\", \"complete\", \"user\", \"content\", \"item\"]\n\n\nclass AttemptLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = AttemptLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = (\"end_timestamp\",)\n ordering = (\"end_timestamp\",)\n\n values = (\n \"item\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"time_spent\",\n \"complete\",\n \"correct\",\n \"hinted\",\n \"answer\",\n \"simple_answer\",\n \"interaction_history\",\n \"user\",\n \"error\",\n \"masterylog\",\n \"sessionlog\",\n )\n","path":"kolibri/core/logger/api.py"}],"string":"[\n {\n \"content\": \"import logging\\nfrom datetime import timedelta\\nfrom itertools import groupby\\nfrom random import randint\\n\\nfrom django.core.exceptions import PermissionDenied\\nfrom django.db import transaction\\nfrom django.db.models import OuterRef\\nfrom django.db.models import Q\\nfrom django.db.models import Subquery\\nfrom django.db.models import Sum\\nfrom django.http import Http404\\nfrom django_filters.rest_framework import CharFilter\\nfrom django_filters.rest_framework import DjangoFilterBackend\\nfrom django_filters.rest_framework import FilterSet\\nfrom django_filters.rest_framework import UUIDFilter\\nfrom le_utils.constants import content_kinds\\nfrom le_utils.constants import exercises\\nfrom rest_framework import filters\\nfrom rest_framework import serializers\\nfrom rest_framework import viewsets\\nfrom rest_framework.exceptions import ValidationError\\nfrom rest_framework.response import Response\\n\\nfrom .models import AttemptLog\\nfrom .models import ContentSessionLog\\nfrom .models import ContentSummaryLog\\nfrom .models import MasteryLog\\nfrom kolibri.core.api import ReadOnlyValuesViewset\\nfrom kolibri.core.auth.api import KolibriAuthPermissions\\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\\nfrom kolibri.core.auth.models import dataset_cache\\nfrom kolibri.core.content.api import OptionalPageNumberPagination\\nfrom kolibri.core.content.models import AssessmentMetaData\\nfrom kolibri.core.content.models import ContentNode\\nfrom kolibri.core.exams.models import Exam\\nfrom kolibri.core.lessons.models import Lesson\\nfrom kolibri.core.logger.constants import interaction_types\\nfrom kolibri.core.logger.constants.exercise_attempts import MAPPING\\nfrom kolibri.core.notifications.api import create_summarylog\\nfrom kolibri.core.notifications.api import parse_attemptslog\\nfrom kolibri.core.notifications.api import parse_summarylog\\nfrom kolibri.core.notifications.api import quiz_answered_notification\\nfrom kolibri.core.notifications.api import quiz_completed_notification\\nfrom kolibri.core.notifications.api import quiz_started_notification\\nfrom kolibri.core.notifications.tasks import wrap_to_save_queue\\nfrom kolibri.utils.time_utils import local_now\\n\\nlogger = logging.getLogger(__name__)\\n\\n\\nclass HexStringUUIDField(serializers.UUIDField):\\n def __init__(self, **kwargs):\\n self.uuid_format = \\\"hex\\\"\\n super(HexStringUUIDField, self).__init__(**kwargs)\\n\\n def to_internal_value(self, data):\\n return super(HexStringUUIDField, self).to_internal_value(data).hex\\n\\n\\nclass StartSessionSerializer(serializers.Serializer):\\n lesson_id = HexStringUUIDField(required=False)\\n node_id = HexStringUUIDField(required=False)\\n # Do this as a special way of handling our coach generated quizzes\\n quiz_id = HexStringUUIDField(required=False)\\n # A flag to indicate whether to start the session over again\\n repeat = serializers.BooleanField(required=False, default=False)\\n\\n def validate(self, data):\\n if \\\"quiz_id\\\" in data and (\\\"lesson_id\\\" in data or \\\"node_id\\\" in data):\\n raise ValidationError(\\\"quiz_id must not be mixed with other context\\\")\\n if \\\"node_id\\\" not in data and \\\"quiz_id\\\" not in data:\\n raise ValidationError(\\\"node_id is required if not a coach assigned quiz\\\")\\n return data\\n\\n\\nclass InteractionSerializer(serializers.Serializer):\\n id = HexStringUUIDField(required=False)\\n item = serializers.CharField()\\n correct = serializers.FloatField(min_value=0, max_value=1)\\n complete = serializers.BooleanField(required=False, default=False)\\n time_spent = serializers.FloatField(min_value=0)\\n\\n answer = serializers.DictField(required=False)\\n simple_answer = serializers.CharField(required=False, allow_blank=True)\\n error = serializers.BooleanField(required=False, default=False)\\n hinted = serializers.BooleanField(required=False, default=False)\\n # Whether to replace the current answer with the new answer\\n # this is a no-op if the attempt is being created.\\n replace = serializers.BooleanField(required=False, default=False)\\n\\n def validate(self, data):\\n if not data[\\\"error\\\"] and \\\"answer\\\" not in data:\\n raise ValidationError(\\\"Must provide an answer if not an error\\\")\\n return data\\n\\n\\nclass UpdateSessionSerializer(serializers.Serializer):\\n progress_delta = serializers.FloatField(min_value=0, max_value=1.0, required=False)\\n progress = serializers.FloatField(min_value=0, max_value=1.0, required=False)\\n time_spent_delta = serializers.FloatField(min_value=0, required=False)\\n extra_fields = serializers.DictField(required=False)\\n interactions = InteractionSerializer(required=False, many=True)\\n\\n def validate(self, data):\\n if \\\"progress_delta\\\" in data and \\\"progress\\\" in data:\\n raise ValidationError(\\n \\\"must not pass progress_delta and progress in the same request\\\"\\n )\\n return data\\n\\n\\n# The lowest integer that can be encoded\\n# in a Django IntegerField across all backends\\nMIN_INTEGER = -2147483648\\n\\n\\nattemptlog_fields = [\\n \\\"id\\\",\\n \\\"correct\\\",\\n \\\"complete\\\",\\n \\\"hinted\\\",\\n \\\"error\\\",\\n \\\"item\\\",\\n \\\"answer\\\",\\n \\\"time_spent\\\",\\n]\\n\\n\\nclass LogContext(object):\\n \\\"\\\"\\\"\\n Object used to provide a limited dict like interface for encoding the\\n context that can be stored in the sessionlog, and which is then\\n returned to the frontend as part of the initialization of a content\\n session.\\n node_id - represents a specific ContentNode in a topic tree, while the\\n content_id for that node is recorded directly on the sessionlog.\\n quiz_id - represents the id of the Exam Model object that this session\\n is regarding (if any).\\n lesson_id - represents the id of the lesson this node_id is being engaged\\n with from within (if any).\\n mastery_level - represents the current 'try' at an assessment, whether an exercise\\n a practice quiz or a coach assigned quiz. Different mastery_level values\\n indicate a different try at the assessment.\\n\\n This is used to encode the values that are sent when initializing a session\\n (see its use in the _get_context method below)\\n and then also used to hold the values from an existing sessionlog when\\n updating a session (see _update_session method).\\n \\\"\\\"\\\"\\n\\n __slots__ = \\\"node_id\\\", \\\"quiz_id\\\", \\\"lesson_id\\\", \\\"mastery_level\\\"\\n\\n def __init__(self, **kwargs):\\n for key, value in kwargs.items():\\n self[key] = value\\n\\n def __setitem__(self, key, value):\\n if key not in self.__slots__:\\n return\\n setattr(self, key, value)\\n\\n def __getitem__(self, key):\\n if key not in self.__slots__:\\n return\\n return getattr(self, key, None)\\n\\n def __contains__(self, key):\\n return key in self.__slots__ and hasattr(self, key)\\n\\n def to_dict(self):\\n \\\"\\\"\\\"\\n Provide a dictionary of the keys stored in the context object.\\n Used to serialize for inclusion in an API Response.\\n \\\"\\\"\\\"\\n output = {}\\n for slot in self.__slots__:\\n if hasattr(self, slot):\\n output[slot] = getattr(self, slot)\\n return output\\n\\n\\nclass ProgressTrackingViewSet(viewsets.GenericViewSet):\\n def _precache_dataset_id(self, user):\\n if user is None or user.is_anonymous():\\n return\\n key = ContentSessionLog.get_related_dataset_cache_key(\\n user.id, user._meta.db_table\\n )\\n dataset_cache.set(key, user.dataset_id)\\n\\n def _check_quiz_permissions(self, user, quiz_id):\\n if user.is_anonymous():\\n raise PermissionDenied(\\\"Cannot access a quiz if not logged in\\\")\\n if not Exam.objects.filter(\\n active=True,\\n assignments__collection_id__in=user.memberships.all().values(\\n \\\"collection_id\\\"\\n ),\\n id=quiz_id,\\n ).exists():\\n raise PermissionDenied(\\\"User does not have access to this quiz_id\\\")\\n\\n def _check_lesson_permissions(self, user, lesson_id):\\n if user.is_anonymous():\\n raise PermissionDenied(\\\"Cannot access a lesson if not logged in\\\")\\n if not Lesson.objects.filter(\\n lesson_assignments__collection_id__in=user.memberships.all().values(\\n \\\"collection_id\\\"\\n ),\\n id=lesson_id,\\n ).exists():\\n raise ValidationError(\\\"Invalid lesson_id\\\")\\n\\n def _get_context(self, user, validated_data):\\n node_id = validated_data.get(\\\"node_id\\\")\\n quiz_id = validated_data.get(\\\"quiz_id\\\")\\n lesson_id = validated_data.get(\\\"lesson_id\\\")\\n\\n context = LogContext()\\n\\n if node_id is not None:\\n try:\\n node = (\\n ContentNode.objects.annotate(\\n mastery_model=Subquery(\\n AssessmentMetaData.objects.filter(\\n contentnode_id=OuterRef(\\\"id\\\")\\n ).values_list(\\\"mastery_model\\\", flat=True)[:1]\\n )\\n )\\n .values(\\\"content_id\\\", \\\"channel_id\\\", \\\"kind\\\", \\\"mastery_model\\\")\\n .get(id=node_id)\\n )\\n mastery_model = node[\\\"mastery_model\\\"]\\n content_id = node[\\\"content_id\\\"]\\n channel_id = node[\\\"channel_id\\\"]\\n kind = node[\\\"kind\\\"]\\n context[\\\"node_id\\\"] = node_id\\n if lesson_id:\\n self._check_lesson_permissions(user, lesson_id)\\n context[\\\"lesson_id\\\"] = lesson_id\\n except ContentNode.DoesNotExist:\\n raise ValidationError(\\\"Invalid node_id\\\")\\n elif quiz_id is not None:\\n self._check_quiz_permissions(user, quiz_id)\\n mastery_model = {\\\"type\\\": \\\"quiz\\\", \\\"coach_assigned\\\": True}\\n content_id = quiz_id\\n channel_id = None\\n kind = content_kinds.QUIZ\\n context[\\\"quiz_id\\\"] = quiz_id\\n return content_id, channel_id, kind, mastery_model, context\\n\\n def _get_or_create_summarylog(\\n self,\\n user,\\n content_id,\\n channel_id,\\n kind,\\n mastery_model,\\n start_timestamp,\\n repeat,\\n context,\\n ):\\n if not user:\\n output = {\\n \\\"progress\\\": 0,\\n \\\"extra_fields\\\": {},\\n \\\"time_spent\\\": 0,\\n \\\"complete\\\": False,\\n }\\n if mastery_model:\\n output.update(\\n {\\n \\\"mastery_criterion\\\": mastery_model,\\n \\\"pastattempts\\\": [],\\n \\\"totalattempts\\\": 0,\\n \\\"complete\\\": False,\\n }\\n )\\n return output\\n\\n try:\\n summarylog = ContentSummaryLog.objects.get(\\n content_id=content_id,\\n user=user,\\n )\\n updated_fields = (\\\"end_timestamp\\\", \\\"channel_id\\\", \\\"_morango_dirty_bit\\\")\\n if repeat:\\n summarylog.progress = 0\\n updated_fields += (\\\"progress\\\",)\\n summarylog.channel_id = channel_id\\n summarylog.end_timestamp = start_timestamp\\n summarylog.save(update_fields=updated_fields)\\n except ContentSummaryLog.DoesNotExist:\\n summarylog = ContentSummaryLog.objects.create(\\n content_id=content_id,\\n user=user,\\n channel_id=channel_id,\\n kind=kind,\\n start_timestamp=start_timestamp,\\n end_timestamp=start_timestamp,\\n )\\n self._process_created_notification(summarylog, context)\\n\\n output = {\\n \\\"progress\\\": summarylog.progress,\\n \\\"extra_fields\\\": summarylog.extra_fields,\\n \\\"time_spent\\\": summarylog.time_spent,\\n \\\"complete\\\": summarylog.progress >= 1,\\n }\\n if mastery_model:\\n assessment_output, mastery_level = self._start_assessment_session(\\n mastery_model,\\n summarylog,\\n user,\\n start_timestamp,\\n repeat,\\n context,\\n )\\n output.update(assessment_output)\\n context[\\\"mastery_level\\\"] = mastery_level\\n return output\\n\\n def create(self, request):\\n \\\"\\\"\\\"\\n Make a POST request to start a content session.\\n\\n Requires one of either:\\n - node_id: the pk of the resource\\n - quiz_id: the pk of the quiz (Exam) object\\n\\n Optional parameters:\\n - repeat: whether to reset previous progress on this content to zero and start fresh\\n - lesson_id: if this is being engaged within a lesson\\n\\n Returns object with properties:\\n - session_id: id of the session object that was created by this call\\n - context: contains node_id, quiz_id, lesson_id, and mastery_level as appropriate\\n - progress: any previous progress on this content resource\\n - time_spent: any previous time spent on this content resource\\n - extra_fields: any previously recorded additional data stored for this resource\\n - complete: whether this resource is completed by this user\\n\\n If this is an assessment, return object will also include:\\n - mastery_criterion: mastery criterion that should be applied to determine completion\\n - pastattempts: serialized subset of recent responses, used to determine completion\\n - totalattempts: total number of previous responses within this run of the assessment resource\\n \\\"\\\"\\\"\\n serializer = StartSessionSerializer(\\n data=request.data, context={\\\"request\\\": request}\\n )\\n serializer.is_valid(raise_exception=True)\\n start_timestamp = local_now()\\n repeat = serializer.validated_data[\\\"repeat\\\"]\\n\\n content_id, channel_id, kind, mastery_model, context = self._get_context(\\n request.user, serializer.validated_data\\n )\\n\\n with transaction.atomic(), dataset_cache:\\n\\n user = None if request.user.is_anonymous() else request.user\\n\\n self._precache_dataset_id(user)\\n\\n output = self._get_or_create_summarylog(\\n user,\\n content_id,\\n channel_id,\\n kind,\\n mastery_model,\\n start_timestamp,\\n repeat,\\n context,\\n )\\n\\n # Must ensure there is no user here to maintain user privacy for logging.\\n visitor_id = (\\n request.COOKIES.get(\\\"visitor_id\\\")\\n if hasattr(request, \\\"COOKIES\\\") and not user\\n else None\\n )\\n sessionlog = ContentSessionLog.objects.create(\\n content_id=content_id,\\n channel_id=channel_id,\\n start_timestamp=start_timestamp,\\n end_timestamp=start_timestamp,\\n user=user,\\n kind=kind,\\n visitor_id=visitor_id,\\n extra_fields={\\\"context\\\": context.to_dict()},\\n )\\n output.update({\\\"session_id\\\": sessionlog.id, \\\"context\\\": context.to_dict()})\\n return Response(output)\\n\\n def _process_created_notification(self, summarylog, context):\\n # dont create notifications upon creating a summary log for an exercise\\n # notifications should only be triggered upon first attempting a question in the exercise\\n if \\\"node_id\\\" in context and summarylog.kind != content_kinds.EXERCISE:\\n # We have sufficient information to only trigger notifications for the specific\\n # lesson that this is being engaged with, but until we can work out the exact\\n # way that we want to match this with contextual progress tracking, we are\\n # not changing this for now.\\n wrap_to_save_queue(\\n create_summarylog,\\n summarylog,\\n )\\n\\n def _process_masterylog_created_notification(self, masterylog, context):\\n if \\\"quiz_id\\\" in context:\\n wrap_to_save_queue(\\n quiz_started_notification, masterylog, context[\\\"quiz_id\\\"]\\n )\\n\\n def _check_quiz_log_permissions(self, masterylog):\\n if (\\n masterylog\\n and masterylog.complete\\n and masterylog.mastery_criterion.get(\\\"type\\\") == \\\"quiz\\\"\\n and masterylog.mastery_criterion.get(\\\"coach_assigned\\\")\\n ):\\n raise PermissionDenied(\\\"Cannot update a finished coach assigned quiz\\\")\\n\\n def _get_or_create_masterylog(\\n self,\\n user,\\n summarylog,\\n repeat,\\n mastery_model,\\n start_timestamp,\\n context,\\n ):\\n masterylog = (\\n MasteryLog.objects.filter(\\n summarylog=summarylog,\\n user=user,\\n )\\n .order_by(\\\"-complete\\\", \\\"-end_timestamp\\\")\\n .first()\\n )\\n\\n if masterylog is None or (masterylog.complete and repeat):\\n # There is no previous masterylog, or the previous masterylog\\n # is complete, and the request is requesting a new attempt.\\n # Here we generate a mastery_level value - this serves to disambiguate multiple\\n # retries at an assessment (either an exercise, practice quiz, or coach assigned quiz).\\n # Having the same mastery_level/summarylog (and hence user) pair will result in the same\\n # identifier being created. So if the same user engages with the same assessment on different\\n # devices, when the data synchronizes, if the mastery_level is the same, this data will be\\n # unified under a single try.\\n if mastery_model.get(\\\"coach_assigned\\\"):\\n # To prevent coach assigned quiz mastery logs from propagating to older\\n # Kolibri versions, we use negative mastery levels for these.\\n # In older versions of Kolibri the mastery_level is validated to be\\n # between 1 and 10 - so these values will fail validation and hence will\\n # not be deserialized from the morango store.\\n # We choose a random integer across the range of acceptable values,\\n # in order to prevent collisions across multiple devices when users\\n # start different tries of the same coach assigned quiz.\\n # With a length of 9 digits for the decimal number, we would need approximately\\n # 45 tries to have a 1 in a million chance of a collision.\\n # Numbers derived using the formula for the generalized birthday problem:\\n # https://en.wikipedia.org/wiki/Birthday_problem#The_generalized_birthday_problem\\n # n=sqrt(2*d*ln(1/(1-p))\\n # where d is the number of combinations of d digits, p is the probability\\n # So for 9 digits, d = 10^9\\n # p = 0.000001 for one in a million\\n mastery_level = randint(MIN_INTEGER, -1)\\n else:\\n mastery_level = (\\n masterylog.mastery_level + 1 if masterylog is not None else 1\\n )\\n\\n masterylog = MasteryLog.objects.create(\\n summarylog=summarylog,\\n user=user,\\n mastery_criterion=mastery_model,\\n start_timestamp=start_timestamp,\\n end_timestamp=start_timestamp,\\n mastery_level=mastery_level,\\n )\\n self._process_masterylog_created_notification(masterylog, context)\\n else:\\n self._check_quiz_log_permissions(masterylog)\\n return masterylog\\n\\n def _start_assessment_session(\\n self, mastery_model, summarylog, user, start_timestamp, repeat, context\\n ):\\n masterylog = self._get_or_create_masterylog(\\n user,\\n summarylog,\\n repeat,\\n mastery_model,\\n start_timestamp,\\n context,\\n )\\n\\n mastery_criterion = masterylog.mastery_criterion\\n exercise_type = mastery_criterion.get(\\\"type\\\")\\n attemptlogs = masterylog.attemptlogs.values(*attemptlog_fields).order_by(\\n \\\"-start_timestamp\\\"\\n )\\n\\n # get the first x logs depending on the exercise type\\n if exercise_type == exercises.M_OF_N:\\n attemptlogs = attemptlogs[: mastery_criterion[\\\"n\\\"]]\\n elif exercise_type in MAPPING:\\n attemptlogs = attemptlogs[: MAPPING[exercise_type]]\\n elif exercise_type == \\\"quiz\\\":\\n attemptlogs = attemptlogs.order_by()\\n else:\\n attemptlogs = attemptlogs[:10]\\n\\n return {\\n \\\"mastery_criterion\\\": mastery_criterion,\\n \\\"pastattempts\\\": attemptlogs,\\n \\\"totalattempts\\\": masterylog.attemptlogs.count(),\\n \\\"complete\\\": masterylog.complete,\\n }, masterylog.mastery_level\\n\\n def _generate_interaction_summary(self, validated_data):\\n if validated_data[\\\"error\\\"]:\\n return {\\n \\\"type\\\": interaction_types.ERROR,\\n }\\n elif validated_data[\\\"hinted\\\"]:\\n return {\\n \\\"type\\\": interaction_types.HINT,\\n \\\"answer\\\": validated_data[\\\"answer\\\"],\\n }\\n return {\\n \\\"type\\\": interaction_types.ANSWER,\\n \\\"answer\\\": validated_data[\\\"answer\\\"],\\n \\\"correct\\\": validated_data[\\\"correct\\\"],\\n }\\n\\n def _process_masterylog_completed_notification(self, masterylog, context):\\n if \\\"quiz_id\\\" in context:\\n wrap_to_save_queue(\\n quiz_completed_notification, masterylog, context[\\\"quiz_id\\\"]\\n )\\n\\n def _update_and_return_mastery_log_id(\\n self, user, complete, summarylog_id, end_timestamp, context\\n ):\\n if not user.is_anonymous() and context[\\\"mastery_level\\\"] is not None:\\n try:\\n masterylog = MasteryLog.objects.get(\\n user=user,\\n mastery_level=context[\\\"mastery_level\\\"],\\n summarylog_id=summarylog_id,\\n )\\n if complete and not masterylog.complete:\\n masterylog.complete = True\\n masterylog.completion_timestamp = end_timestamp\\n masterylog.save(\\n update_fields=(\\n \\\"complete\\\",\\n \\\"completion_timestamp\\\",\\n \\\"_morango_dirty_bit\\\",\\n )\\n )\\n self._process_masterylog_completed_notification(masterylog, context)\\n else:\\n self._check_quiz_log_permissions(masterylog)\\n return masterylog.id\\n except MasteryLog.DoesNotExist:\\n raise ValidationError(\\n \\\"Invalid mastery_level value, this session has not been started.\\\"\\n )\\n\\n def _update_attempt(self, attemptlog, interaction, update_fields, end_timestamp):\\n\\n interaction_summary = self._generate_interaction_summary(interaction)\\n\\n attemptlog.interaction_history += [interaction_summary]\\n attemptlog.end_timestamp = end_timestamp\\n attemptlog.time_spent = interaction[\\\"time_spent\\\"]\\n\\n if interaction[\\\"error\\\"] and not attemptlog.error:\\n attemptlog.error = interaction[\\\"error\\\"]\\n update_fields.add(\\\"error\\\")\\n\\n # Mark hinted only if it is not already correct, and don't undo previously hinted\\n if interaction[\\\"hinted\\\"] and not attemptlog.hinted and not attemptlog.correct:\\n attemptlog.hinted = interaction[\\\"hinted\\\"]\\n update_fields.add(\\\"hinted\\\")\\n\\n if interaction[\\\"replace\\\"]:\\n attemptlog.correct = interaction[\\\"correct\\\"]\\n update_fields.add(\\\"correct\\\")\\n\\n if \\\"answer\\\" in interaction:\\n attemptlog.answer = interaction[\\\"answer\\\"]\\n update_fields.add(\\\"answer\\\")\\n\\n if \\\"simple_answer\\\" in interaction:\\n attemptlog.simple_answer = interaction[\\\"simple_answer\\\"]\\n update_fields.add(\\\"simple_answer\\\")\\n\\n if interaction[\\\"complete\\\"] and not attemptlog.complete:\\n attemptlog.complete = interaction[\\\"complete\\\"]\\n attemptlog.completion_timestamp = end_timestamp\\n update_fields.update({\\\"complete\\\", \\\"completion_timestamp\\\"})\\n\\n def _create_attempt(\\n self, session_id, masterylog_id, user, interaction, end_timestamp\\n ):\\n start_timestamp = end_timestamp - timedelta(seconds=interaction[\\\"time_spent\\\"])\\n\\n interaction_summary = self._generate_interaction_summary(interaction)\\n\\n del interaction[\\\"replace\\\"]\\n\\n return AttemptLog(\\n sessionlog_id=session_id,\\n masterylog_id=masterylog_id,\\n interaction_history=[interaction_summary],\\n user=user,\\n start_timestamp=start_timestamp,\\n completion_timestamp=end_timestamp if interaction[\\\"complete\\\"] else None,\\n end_timestamp=end_timestamp,\\n **interaction\\n )\\n\\n def _update_or_create_attempts(\\n self, session_id, masterylog_id, user, interactions, end_timestamp, context\\n ):\\n user = None if user.is_anonymous() else user\\n\\n output = []\\n\\n for _, item_interactions in groupby(interactions, lambda x: x[\\\"item\\\"]):\\n created = False\\n update_fields = {\\n \\\"interaction_history\\\",\\n \\\"end_timestamp\\\",\\n \\\"time_spent\\\",\\n \\\"_morango_dirty_bit\\\",\\n }\\n item_interactions = list(item_interactions)\\n if \\\"id\\\" in item_interactions[0]:\\n try:\\n attemptlog = AttemptLog.objects.get(\\n id=item_interactions[0][\\\"id\\\"],\\n masterylog_id=masterylog_id,\\n user=user,\\n )\\n except AttemptLog.DoesNotExist:\\n raise ValidationError(\\\"Invalid attemptlog id specified\\\")\\n else:\\n attemptlog = self._create_attempt(\\n session_id,\\n masterylog_id,\\n user,\\n item_interactions[0],\\n end_timestamp,\\n )\\n created = True\\n item_interactions = item_interactions[1:]\\n updated = bool(item_interactions)\\n\\n for response in item_interactions:\\n self._update_attempt(attemptlog, response, update_fields, end_timestamp)\\n\\n self._process_attempt_notifications(\\n attemptlog, context, user, created, updated\\n )\\n attemptlog.save(\\n update_fields=None if created else update_fields, force_insert=created\\n )\\n attempt = {}\\n for field in attemptlog_fields:\\n attempt[field] = getattr(attemptlog, field)\\n output.append(attempt)\\n return {\\\"attempts\\\": output}\\n\\n def _process_attempt_notifications(\\n self, attemptlog, context, user, created, updated\\n ):\\n if user is None:\\n return\\n if \\\"lesson_id\\\" in context:\\n wrap_to_save_queue(parse_attemptslog, attemptlog)\\n if created and \\\"quiz_id\\\" in context:\\n wrap_to_save_queue(\\n quiz_answered_notification, attemptlog, context[\\\"quiz_id\\\"]\\n )\\n\\n def _get_session_log(self, session_id, user):\\n try:\\n if user.is_anonymous():\\n return ContentSessionLog.objects.get(id=session_id, user__isnull=True)\\n else:\\n return ContentSessionLog.objects.get(id=session_id, user=user)\\n except ContentSessionLog.DoesNotExist:\\n raise Http404(\\n \\\"ContentSessionLog with id {} does not exist\\\".format(session_id)\\n )\\n\\n def _normalize_progress(self, progress):\\n return max(0, min(1.0, progress))\\n\\n def _update_content_log(self, log, end_timestamp, validated_data):\\n update_fields = (\\\"end_timestamp\\\", \\\"_morango_dirty_bit\\\")\\n\\n log.end_timestamp = end_timestamp\\n if \\\"progress_delta\\\" in validated_data:\\n update_fields += (\\\"progress\\\",)\\n log.progress = self._normalize_progress(\\n log.progress + validated_data[\\\"progress_delta\\\"]\\n )\\n elif \\\"progress\\\" in validated_data:\\n update_fields += (\\\"progress\\\",)\\n log.progress = self._normalize_progress(validated_data[\\\"progress\\\"])\\n if \\\"time_spent_delta\\\" in validated_data:\\n update_fields += (\\\"time_spent\\\",)\\n log.time_spent += validated_data[\\\"time_spent_delta\\\"]\\n return update_fields\\n\\n def _update_summary_log(\\n self, user, sessionlog, end_timestamp, validated_data, context\\n ):\\n if user.is_anonymous():\\n return\\n summarylog = ContentSummaryLog.objects.get(\\n content_id=sessionlog.content_id, user=user\\n )\\n was_complete = summarylog.progress >= 1\\n\\n update_fields = self._update_content_log(\\n summarylog, end_timestamp, validated_data\\n )\\n\\n if summarylog.progress >= 1 and not was_complete:\\n summarylog.completion_timestamp = end_timestamp\\n update_fields += (\\\"completion_timestamp\\\",)\\n self._process_completed_notification(summarylog, context)\\n if \\\"extra_fields\\\" in validated_data:\\n update_fields += (\\\"extra_fields\\\",)\\n summarylog.extra_fields = validated_data[\\\"extra_fields\\\"]\\n\\n summarylog.save(update_fields=update_fields)\\n return summarylog\\n\\n def _update_session(self, session_id, user, end_timestamp, validated_data):\\n sessionlog = self._get_session_log(session_id, user)\\n\\n context = LogContext(**sessionlog.extra_fields.get(\\\"context\\\", {}))\\n\\n if \\\"quiz_id\\\" in context:\\n self._check_quiz_permissions(user, context[\\\"quiz_id\\\"])\\n\\n update_fields = self._update_content_log(\\n sessionlog, end_timestamp, validated_data\\n )\\n sessionlog.save(update_fields=update_fields)\\n\\n summarylog = self._update_summary_log(\\n user, sessionlog, end_timestamp, validated_data, context\\n )\\n\\n if summarylog is not None:\\n complete = summarylog.progress >= 1\\n else:\\n complete = sessionlog.progress >= 1\\n\\n return {\\\"complete\\\": complete}, summarylog.id if summarylog else None, context\\n\\n def _process_completed_notification(self, summarylog, context):\\n if \\\"node_id\\\" in context:\\n wrap_to_save_queue(\\n parse_summarylog,\\n summarylog,\\n )\\n\\n def update(self, request, pk=None):\\n \\\"\\\"\\\"\\n Make a PUT request to update the current session\\n\\n Requires one of either:\\n - progress_delta: increase the progress by this amount\\n - progress: set the progress to this amount\\n\\n Can also update time spent recorded with a delta:\\n - time_spent_delta: number of seconds to increase time_spent by\\n\\n And update the extra_fields value stored:\\n - extra_fields: the complete representation to set extra_fields to\\n\\n If creating or updating attempts for an assessment must include:\\n - interactions: an array of objects, if updating an existing attempt, must include attempt_id\\n\\n Returns an object with the properties:\\n - complete: boolean indicating if the resource is completed\\n\\n If an attempt at an assessment was included, then this parameter will be included:\\n - attempts: serialized form of the attempt, equivalent to that returned in pastattempts from\\n session initialization\\n \\\"\\\"\\\"\\n if pk is None:\\n raise Http404\\n serializer = UpdateSessionSerializer(\\n data=request.data, context={\\\"request\\\": request}\\n )\\n serializer.is_valid(raise_exception=True)\\n end_timestamp = local_now()\\n validated_data = serializer.validated_data\\n\\n with transaction.atomic(), dataset_cache:\\n self._precache_dataset_id(request.user)\\n\\n output, summarylog_id, context = self._update_session(\\n pk, request.user, end_timestamp, validated_data\\n )\\n masterylog_id = self._update_and_return_mastery_log_id(\\n request.user, output[\\\"complete\\\"], summarylog_id, end_timestamp, context\\n )\\n if \\\"interactions\\\" in validated_data:\\n attempt_output = self._update_or_create_attempts(\\n pk,\\n masterylog_id,\\n request.user,\\n validated_data[\\\"interactions\\\"],\\n end_timestamp,\\n context,\\n )\\n output.update(attempt_output)\\n return Response(output)\\n\\n\\nclass TotalContentProgressViewSet(viewsets.GenericViewSet):\\n def retrieve(self, request, pk=None):\\n if request.user.is_anonymous() or pk != request.user.id:\\n raise PermissionDenied(\\\"Can only access progress data for self\\\")\\n progress = (\\n request.user.contentsummarylog_set.filter(progress=1)\\n .aggregate(Sum(\\\"progress\\\"))\\n .get(\\\"progress__sum\\\")\\n )\\n return Response(\\n {\\n \\\"id\\\": pk,\\n \\\"progress\\\": progress,\\n }\\n )\\n\\n\\nclass BaseLogFilter(FilterSet):\\n facility = UUIDFilter(method=\\\"filter_facility\\\")\\n classroom = UUIDFilter(method=\\\"filter_classroom\\\")\\n learner_group = UUIDFilter(method=\\\"filter_learner_group\\\")\\n\\n # Only a superuser can filter by facilities\\n def filter_facility(self, queryset, name, value):\\n return queryset.filter(user__facility=value)\\n\\n def filter_classroom(self, queryset, name, value):\\n return queryset.filter(\\n Q(user__memberships__collection_id=value)\\n | Q(user__memberships__collection__parent_id=value)\\n )\\n\\n def filter_learner_group(self, queryset, name, value):\\n return queryset.filter(user__memberships__collection_id=value)\\n\\n\\nclass MasteryFilter(BaseLogFilter):\\n content = UUIDFilter(name=\\\"summarylog__content_id\\\")\\n\\n class Meta:\\n model = MasteryLog\\n fields = [\\\"content\\\"]\\n\\n\\nclass MasteryLogViewSet(ReadOnlyValuesViewset):\\n permission_classes = (KolibriAuthPermissions,)\\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\\n queryset = MasteryLog.objects.all()\\n pagination_class = OptionalPageNumberPagination\\n filter_class = MasteryFilter\\n values = (\\n \\\"user\\\",\\n \\\"summarylog\\\",\\n \\\"mastery_criterion\\\",\\n \\\"start_timestamp\\\",\\n \\\"end_timestamp\\\",\\n \\\"completion_timestamp\\\",\\n \\\"mastery_level\\\",\\n \\\"complete\\\",\\n )\\n\\n\\nclass AttemptFilter(BaseLogFilter):\\n content = CharFilter(method=\\\"filter_content\\\")\\n\\n def filter_content(self, queryset, name, value):\\n return queryset.filter(masterylog__summarylog__content_id=value)\\n\\n class Meta:\\n model = AttemptLog\\n fields = [\\\"masterylog\\\", \\\"complete\\\", \\\"user\\\", \\\"content\\\", \\\"item\\\"]\\n\\n\\nclass AttemptLogViewSet(ReadOnlyValuesViewset):\\n permission_classes = (KolibriAuthPermissions,)\\n filter_backends = (\\n KolibriAuthPermissionsFilter,\\n DjangoFilterBackend,\\n filters.OrderingFilter,\\n )\\n queryset = AttemptLog.objects.all()\\n pagination_class = OptionalPageNumberPagination\\n filter_class = AttemptFilter\\n ordering_fields = (\\\"end_timestamp\\\",)\\n ordering = (\\\"end_timestamp\\\",)\\n\\n values = (\\n \\\"item\\\",\\n \\\"start_timestamp\\\",\\n \\\"end_timestamp\\\",\\n \\\"completion_timestamp\\\",\\n \\\"time_spent\\\",\\n \\\"complete\\\",\\n \\\"correct\\\",\\n \\\"hinted\\\",\\n \\\"answer\\\",\\n \\\"simple_answer\\\",\\n \\\"interaction_history\\\",\\n \\\"user\\\",\\n \\\"error\\\",\\n \\\"masterylog\\\",\\n \\\"sessionlog\\\",\\n )\\n\",\n \"path\": \"kolibri/core/logger/api.py\"\n }\n]"},"after_files":{"kind":"list like","value":[{"content":"import logging\nfrom datetime import timedelta\nfrom itertools import groupby\nfrom random import randint\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.http import Http404\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import UUIDFilter\nfrom le_utils.constants import content_kinds\nfrom le_utils.constants import exercises\nfrom rest_framework import filters\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom .models import AttemptLog\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom .models import MasteryLog\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.auth.models import dataset_cache\nfrom kolibri.core.content.api import OptionalPageNumberPagination\nfrom kolibri.core.content.models import AssessmentMetaData\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.constants import interaction_types\nfrom kolibri.core.logger.constants.exercise_attempts import MAPPING\nfrom kolibri.core.notifications.api import create_summarylog\nfrom kolibri.core.notifications.api import parse_attemptslog\nfrom kolibri.core.notifications.api import parse_summarylog\nfrom kolibri.core.notifications.api import quiz_answered_notification\nfrom kolibri.core.notifications.api import quiz_completed_notification\nfrom kolibri.core.notifications.api import quiz_started_notification\nfrom kolibri.core.notifications.tasks import wrap_to_save_queue\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass HexStringUUIDField(serializers.UUIDField):\n def __init__(self, **kwargs):\n self.uuid_format = \"hex\"\n super(HexStringUUIDField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n return super(HexStringUUIDField, self).to_internal_value(data).hex\n\n\nclass StartSessionSerializer(serializers.Serializer):\n lesson_id = HexStringUUIDField(required=False)\n node_id = HexStringUUIDField(required=False)\n # Do this as a special way of handling our coach generated quizzes\n quiz_id = HexStringUUIDField(required=False)\n # A flag to indicate whether to start the session over again\n repeat = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if \"quiz_id\" in data and (\"lesson_id\" in data or \"node_id\" in data):\n raise ValidationError(\"quiz_id must not be mixed with other context\")\n if \"node_id\" not in data and \"quiz_id\" not in data:\n raise ValidationError(\"node_id is required if not a coach assigned quiz\")\n return data\n\n\nclass InteractionSerializer(serializers.Serializer):\n id = HexStringUUIDField(required=False)\n item = serializers.CharField()\n correct = serializers.FloatField(min_value=0, max_value=1)\n complete = serializers.BooleanField(required=False, default=False)\n time_spent = serializers.FloatField(min_value=0)\n\n answer = serializers.DictField(required=False)\n simple_answer = serializers.CharField(required=False, allow_blank=True)\n error = serializers.BooleanField(required=False, default=False)\n hinted = serializers.BooleanField(required=False, default=False)\n # Whether to replace the current answer with the new answer\n # this is a no-op if the attempt is being created.\n replace = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if not data[\"error\"] and \"answer\" not in data:\n raise ValidationError(\"Must provide an answer if not an error\")\n return data\n\n\nclass UpdateSessionSerializer(serializers.Serializer):\n progress_delta = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n progress = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n time_spent_delta = serializers.FloatField(min_value=0, required=False)\n extra_fields = serializers.DictField(required=False)\n interactions = InteractionSerializer(required=False, many=True)\n\n def validate(self, data):\n if \"progress_delta\" in data and \"progress\" in data:\n raise ValidationError(\n \"must not pass progress_delta and progress in the same request\"\n )\n return data\n\n\n# The lowest integer that can be encoded\n# in a Django IntegerField across all backends\nMIN_INTEGER = -2147483648\n\n\nattemptlog_fields = [\n \"id\",\n \"correct\",\n \"complete\",\n \"hinted\",\n \"error\",\n \"item\",\n \"answer\",\n \"time_spent\",\n]\n\n\nclass LogContext(object):\n \"\"\"\n Object used to provide a limited dict like interface for encoding the\n context that can be stored in the sessionlog, and which is then\n returned to the frontend as part of the initialization of a content\n session.\n node_id - represents a specific ContentNode in a topic tree, while the\n content_id for that node is recorded directly on the sessionlog.\n quiz_id - represents the id of the Exam Model object that this session\n is regarding (if any).\n lesson_id - represents the id of the lesson this node_id is being engaged\n with from within (if any).\n mastery_level - represents the current 'try' at an assessment, whether an exercise\n a practice quiz or a coach assigned quiz. Different mastery_level values\n indicate a different try at the assessment.\n\n This is used to encode the values that are sent when initializing a session\n (see its use in the _get_context method below)\n and then also used to hold the values from an existing sessionlog when\n updating a session (see _update_session method).\n \"\"\"\n\n __slots__ = \"node_id\", \"quiz_id\", \"lesson_id\", \"mastery_level\"\n\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n self[key] = value\n\n def __setitem__(self, key, value):\n if key not in self.__slots__:\n return\n setattr(self, key, value)\n\n def __getitem__(self, key):\n if key not in self.__slots__:\n return\n return getattr(self, key, None)\n\n def __contains__(self, key):\n return key in self.__slots__ and hasattr(self, key)\n\n def to_dict(self):\n \"\"\"\n Provide a dictionary of the keys stored in the context object.\n Used to serialize for inclusion in an API Response.\n \"\"\"\n output = {}\n for slot in self.__slots__:\n if hasattr(self, slot):\n output[slot] = getattr(self, slot)\n return output\n\n\nclass ProgressTrackingViewSet(viewsets.GenericViewSet):\n def _precache_dataset_id(self, user):\n if user is None or user.is_anonymous():\n return\n key = ContentSessionLog.get_related_dataset_cache_key(\n user.id, user._meta.db_table\n )\n dataset_cache.set(key, user.dataset_id)\n\n def _check_quiz_permissions(self, user, quiz_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a quiz if not logged in\")\n if not Exam.objects.filter(\n active=True,\n assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=quiz_id,\n ).exists():\n raise PermissionDenied(\"User does not have access to this quiz_id\")\n\n def _check_lesson_permissions(self, user, lesson_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a lesson if not logged in\")\n if not Lesson.objects.filter(\n lesson_assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=lesson_id,\n ).exists():\n raise ValidationError(\"Invalid lesson_id\")\n\n def _get_context(self, user, validated_data):\n node_id = validated_data.get(\"node_id\")\n quiz_id = validated_data.get(\"quiz_id\")\n lesson_id = validated_data.get(\"lesson_id\")\n\n context = LogContext()\n\n if node_id is not None:\n try:\n node = (\n ContentNode.objects.annotate(\n mastery_model=Subquery(\n AssessmentMetaData.objects.filter(\n contentnode_id=OuterRef(\"id\")\n ).values_list(\"mastery_model\", flat=True)[:1]\n )\n )\n .values(\"content_id\", \"channel_id\", \"kind\", \"mastery_model\")\n .get(id=node_id)\n )\n mastery_model = node[\"mastery_model\"]\n content_id = node[\"content_id\"]\n channel_id = node[\"channel_id\"]\n kind = node[\"kind\"]\n context[\"node_id\"] = node_id\n if lesson_id:\n self._check_lesson_permissions(user, lesson_id)\n context[\"lesson_id\"] = lesson_id\n except ContentNode.DoesNotExist:\n raise ValidationError(\"Invalid node_id\")\n elif quiz_id is not None:\n self._check_quiz_permissions(user, quiz_id)\n mastery_model = {\"type\": \"quiz\", \"coach_assigned\": True}\n content_id = quiz_id\n channel_id = None\n kind = content_kinds.QUIZ\n context[\"quiz_id\"] = quiz_id\n return content_id, channel_id, kind, mastery_model, context\n\n def _get_or_create_summarylog(\n self,\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n ):\n if not user:\n output = {\n \"progress\": 0,\n \"extra_fields\": {},\n \"time_spent\": 0,\n \"complete\": False,\n }\n if mastery_model:\n output.update(\n {\n \"mastery_criterion\": mastery_model,\n \"pastattempts\": [],\n \"totalattempts\": 0,\n \"complete\": False,\n }\n )\n return output\n\n try:\n summarylog = ContentSummaryLog.objects.get(\n content_id=content_id,\n user=user,\n )\n updated_fields = (\"end_timestamp\", \"channel_id\", \"_morango_dirty_bit\")\n if repeat:\n summarylog.progress = 0\n updated_fields += (\"progress\",)\n summarylog.channel_id = channel_id\n summarylog.end_timestamp = start_timestamp\n summarylog.save(update_fields=updated_fields)\n except ContentSummaryLog.DoesNotExist:\n summarylog = ContentSummaryLog.objects.create(\n content_id=content_id,\n user=user,\n channel_id=channel_id,\n kind=kind,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n )\n self._process_created_notification(summarylog, context)\n\n output = {\n \"progress\": summarylog.progress,\n \"extra_fields\": summarylog.extra_fields,\n \"time_spent\": summarylog.time_spent,\n \"complete\": summarylog.progress >= 1,\n }\n if mastery_model:\n assessment_output, mastery_level = self._start_assessment_session(\n mastery_model,\n summarylog,\n user,\n start_timestamp,\n repeat,\n context,\n )\n output.update(assessment_output)\n context[\"mastery_level\"] = mastery_level\n return output\n\n def create(self, request):\n \"\"\"\n Make a POST request to start a content session.\n\n Requires one of either:\n - node_id: the pk of the resource\n - quiz_id: the pk of the quiz (Exam) object\n\n Optional parameters:\n - repeat: whether to reset previous progress on this content to zero and start fresh\n - lesson_id: if this is being engaged within a lesson\n\n Returns object with properties:\n - session_id: id of the session object that was created by this call\n - context: contains node_id, quiz_id, lesson_id, and mastery_level as appropriate\n - progress: any previous progress on this content resource\n - time_spent: any previous time spent on this content resource\n - extra_fields: any previously recorded additional data stored for this resource\n - complete: whether this resource is completed by this user\n\n If this is an assessment, return object will also include:\n - mastery_criterion: mastery criterion that should be applied to determine completion\n - pastattempts: serialized subset of recent responses, used to determine completion\n - totalattempts: total number of previous responses within this run of the assessment resource\n \"\"\"\n serializer = StartSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n start_timestamp = local_now()\n repeat = serializer.validated_data[\"repeat\"]\n\n content_id, channel_id, kind, mastery_model, context = self._get_context(\n request.user, serializer.validated_data\n )\n\n with transaction.atomic(), dataset_cache:\n\n user = None if request.user.is_anonymous() else request.user\n\n self._precache_dataset_id(user)\n\n output = self._get_or_create_summarylog(\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n )\n\n # Must ensure there is no user here to maintain user privacy for logging.\n visitor_id = (\n request.COOKIES.get(\"visitor_id\")\n if hasattr(request, \"COOKIES\") and not user\n else None\n )\n sessionlog = ContentSessionLog.objects.create(\n content_id=content_id,\n channel_id=channel_id,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n user=user,\n kind=kind,\n visitor_id=visitor_id,\n extra_fields={\"context\": context.to_dict()},\n )\n output.update({\"session_id\": sessionlog.id, \"context\": context.to_dict()})\n return Response(output)\n\n def _process_created_notification(self, summarylog, context):\n # dont create notifications upon creating a summary log for an exercise\n # notifications should only be triggered upon first attempting a question in the exercise\n if \"node_id\" in context and summarylog.kind != content_kinds.EXERCISE:\n # We have sufficient information to only trigger notifications for the specific\n # lesson that this is being engaged with, but until we can work out the exact\n # way that we want to match this with contextual progress tracking, we are\n # not changing this for now.\n wrap_to_save_queue(\n create_summarylog,\n summarylog,\n )\n\n def _process_masterylog_created_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_started_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _check_quiz_log_permissions(self, masterylog):\n if (\n masterylog\n and masterylog.complete\n and masterylog.mastery_criterion.get(\"type\") == \"quiz\"\n and masterylog.mastery_criterion.get(\"coach_assigned\")\n ):\n raise PermissionDenied(\"Cannot update a finished coach assigned quiz\")\n\n def _get_or_create_masterylog(\n self,\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n ):\n masterylog = (\n MasteryLog.objects.filter(\n summarylog=summarylog,\n user=user,\n )\n .order_by(\"-complete\", \"-end_timestamp\")\n .first()\n )\n\n if masterylog is None or (masterylog.complete and repeat):\n # There is no previous masterylog, or the previous masterylog\n # is complete, and the request is requesting a new attempt.\n # Here we generate a mastery_level value - this serves to disambiguate multiple\n # retries at an assessment (either an exercise, practice quiz, or coach assigned quiz).\n # Having the same mastery_level/summarylog (and hence user) pair will result in the same\n # identifier being created. So if the same user engages with the same assessment on different\n # devices, when the data synchronizes, if the mastery_level is the same, this data will be\n # unified under a single try.\n if mastery_model.get(\"coach_assigned\"):\n # To prevent coach assigned quiz mastery logs from propagating to older\n # Kolibri versions, we use negative mastery levels for these.\n # In older versions of Kolibri the mastery_level is validated to be\n # between 1 and 10 - so these values will fail validation and hence will\n # not be deserialized from the morango store.\n # We choose a random integer across the range of acceptable values,\n # in order to prevent collisions across multiple devices when users\n # start different tries of the same coach assigned quiz.\n # With a length of 9 digits for the decimal number, we would need approximately\n # 45 tries to have a 1 in a million chance of a collision.\n # Numbers derived using the formula for the generalized birthday problem:\n # https://en.wikipedia.org/wiki/Birthday_problem#The_generalized_birthday_problem\n # n=sqrt(2*d*ln(1/(1-p))\n # where d is the number of combinations of d digits, p is the probability\n # So for 9 digits, d = 10^9\n # p = 0.000001 for one in a million\n mastery_level = randint(MIN_INTEGER, -1)\n else:\n mastery_level = (\n masterylog.mastery_level + 1 if masterylog is not None else 1\n )\n\n masterylog = MasteryLog.objects.create(\n summarylog=summarylog,\n user=user,\n mastery_criterion=mastery_model,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n mastery_level=mastery_level,\n )\n self._process_masterylog_created_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog\n\n def _start_assessment_session(\n self, mastery_model, summarylog, user, start_timestamp, repeat, context\n ):\n masterylog = self._get_or_create_masterylog(\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n )\n\n mastery_criterion = masterylog.mastery_criterion\n exercise_type = mastery_criterion.get(\"type\")\n attemptlogs = masterylog.attemptlogs.values(*attemptlog_fields).order_by(\n \"-start_timestamp\"\n )\n\n # get the first x logs depending on the exercise type\n if exercise_type == exercises.M_OF_N:\n attemptlogs = attemptlogs[: mastery_criterion[\"n\"]]\n elif exercise_type in MAPPING:\n attemptlogs = attemptlogs[: MAPPING[exercise_type]]\n elif exercise_type == \"quiz\":\n attemptlogs = attemptlogs.order_by()\n else:\n attemptlogs = attemptlogs[:10]\n\n return {\n \"mastery_criterion\": mastery_criterion,\n \"pastattempts\": attemptlogs,\n \"totalattempts\": masterylog.attemptlogs.count(),\n \"complete\": masterylog.complete,\n }, masterylog.mastery_level\n\n def _generate_interaction_summary(self, validated_data):\n if validated_data[\"error\"]:\n return {\n \"type\": interaction_types.ERROR,\n }\n elif validated_data[\"hinted\"]:\n return {\n \"type\": interaction_types.HINT,\n \"answer\": validated_data[\"answer\"],\n }\n return {\n \"type\": interaction_types.ANSWER,\n \"answer\": validated_data[\"answer\"],\n \"correct\": validated_data[\"correct\"],\n }\n\n def _process_masterylog_completed_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_completed_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _update_and_return_mastery_log_id(\n self, user, complete, summarylog_id, end_timestamp, context\n ):\n if not user.is_anonymous() and context[\"mastery_level\"] is not None:\n try:\n masterylog = MasteryLog.objects.get(\n user=user,\n mastery_level=context[\"mastery_level\"],\n summarylog_id=summarylog_id,\n )\n if complete and not masterylog.complete:\n masterylog.complete = True\n masterylog.completion_timestamp = end_timestamp\n masterylog.save(\n update_fields=(\n \"complete\",\n \"completion_timestamp\",\n \"_morango_dirty_bit\",\n )\n )\n self._process_masterylog_completed_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog.id\n except MasteryLog.DoesNotExist:\n raise ValidationError(\n \"Invalid mastery_level value, this session has not been started.\"\n )\n\n def _update_attempt(self, attemptlog, interaction, update_fields, end_timestamp):\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n attemptlog.interaction_history += [interaction_summary]\n attemptlog.end_timestamp = end_timestamp\n attemptlog.time_spent = interaction[\"time_spent\"]\n\n if interaction[\"error\"] and not attemptlog.error:\n attemptlog.error = interaction[\"error\"]\n update_fields.add(\"error\")\n\n # Mark hinted only if it is not already correct, and don't undo previously hinted\n if interaction[\"hinted\"] and not attemptlog.hinted and not attemptlog.correct:\n attemptlog.hinted = interaction[\"hinted\"]\n update_fields.add(\"hinted\")\n\n if interaction[\"replace\"]:\n attemptlog.correct = interaction[\"correct\"]\n update_fields.add(\"correct\")\n\n if \"answer\" in interaction:\n attemptlog.answer = interaction[\"answer\"]\n update_fields.add(\"answer\")\n\n if \"simple_answer\" in interaction:\n attemptlog.simple_answer = interaction[\"simple_answer\"]\n update_fields.add(\"simple_answer\")\n\n if interaction[\"complete\"] and not attemptlog.complete:\n attemptlog.complete = interaction[\"complete\"]\n attemptlog.completion_timestamp = end_timestamp\n update_fields.update({\"complete\", \"completion_timestamp\"})\n\n def _create_attempt(\n self, session_id, masterylog_id, user, interaction, end_timestamp\n ):\n start_timestamp = end_timestamp - timedelta(seconds=interaction[\"time_spent\"])\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n del interaction[\"replace\"]\n\n return AttemptLog(\n sessionlog_id=session_id,\n masterylog_id=masterylog_id,\n interaction_history=[interaction_summary],\n user=user,\n start_timestamp=start_timestamp,\n completion_timestamp=end_timestamp if interaction[\"complete\"] else None,\n end_timestamp=end_timestamp,\n **interaction\n )\n\n def _update_or_create_attempts(\n self, session_id, masterylog_id, user, interactions, end_timestamp, context\n ):\n user = None if user.is_anonymous() else user\n\n output = []\n\n for _, item_interactions in groupby(interactions, lambda x: x[\"item\"]):\n created = False\n update_fields = {\n \"interaction_history\",\n \"end_timestamp\",\n \"time_spent\",\n \"_morango_dirty_bit\",\n }\n item_interactions = list(item_interactions)\n if \"id\" in item_interactions[0]:\n try:\n attemptlog = AttemptLog.objects.get(\n id=item_interactions[0][\"id\"],\n masterylog_id=masterylog_id,\n user=user,\n )\n except AttemptLog.DoesNotExist:\n raise ValidationError(\"Invalid attemptlog id specified\")\n else:\n attemptlog = self._create_attempt(\n session_id,\n masterylog_id,\n user,\n item_interactions[0],\n end_timestamp,\n )\n created = True\n item_interactions = item_interactions[1:]\n updated = bool(item_interactions)\n\n for response in item_interactions:\n self._update_attempt(attemptlog, response, update_fields, end_timestamp)\n\n self._process_attempt_notifications(\n attemptlog, context, user, created, updated\n )\n attemptlog.save(\n update_fields=None if created else update_fields, force_insert=created\n )\n attempt = {}\n for field in attemptlog_fields:\n attempt[field] = getattr(attemptlog, field)\n output.append(attempt)\n return {\"attempts\": output}\n\n def _process_attempt_notifications(\n self, attemptlog, context, user, created, updated\n ):\n if user is None:\n return\n if \"lesson_id\" in context:\n wrap_to_save_queue(parse_attemptslog, attemptlog)\n if created and \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_answered_notification, attemptlog, context[\"quiz_id\"]\n )\n\n def _get_session_log(self, session_id, user):\n try:\n if user.is_anonymous():\n return ContentSessionLog.objects.get(id=session_id, user__isnull=True)\n else:\n return ContentSessionLog.objects.get(id=session_id, user=user)\n except ContentSessionLog.DoesNotExist:\n raise Http404(\n \"ContentSessionLog with id {} does not exist\".format(session_id)\n )\n\n def _normalize_progress(self, progress):\n return max(0, min(1.0, progress))\n\n def _update_content_log(self, log, end_timestamp, validated_data):\n update_fields = (\"end_timestamp\", \"_morango_dirty_bit\")\n\n log.end_timestamp = end_timestamp\n if \"progress_delta\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(\n log.progress + validated_data[\"progress_delta\"]\n )\n elif \"progress\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(validated_data[\"progress\"])\n if \"time_spent_delta\" in validated_data:\n update_fields += (\"time_spent\",)\n log.time_spent += validated_data[\"time_spent_delta\"]\n return update_fields\n\n def _update_summary_log(\n self, user, sessionlog, end_timestamp, validated_data, context\n ):\n if user.is_anonymous():\n return\n summarylog = ContentSummaryLog.objects.get(\n content_id=sessionlog.content_id, user=user\n )\n was_complete = summarylog.progress >= 1\n\n update_fields = self._update_content_log(\n summarylog, end_timestamp, validated_data\n )\n\n if summarylog.progress >= 1 and not was_complete:\n summarylog.completion_timestamp = end_timestamp\n update_fields += (\"completion_timestamp\",)\n self._process_completed_notification(summarylog, context)\n if \"extra_fields\" in validated_data:\n update_fields += (\"extra_fields\",)\n summarylog.extra_fields = validated_data[\"extra_fields\"]\n\n summarylog.save(update_fields=update_fields)\n return summarylog\n\n def _update_session(self, session_id, user, end_timestamp, validated_data):\n sessionlog = self._get_session_log(session_id, user)\n\n context = LogContext(**sessionlog.extra_fields.get(\"context\", {}))\n\n if \"quiz_id\" in context:\n self._check_quiz_permissions(user, context[\"quiz_id\"])\n\n update_fields = self._update_content_log(\n sessionlog, end_timestamp, validated_data\n )\n sessionlog.save(update_fields=update_fields)\n\n summarylog = self._update_summary_log(\n user, sessionlog, end_timestamp, validated_data, context\n )\n\n if summarylog is not None:\n complete = summarylog.progress >= 1\n else:\n complete = sessionlog.progress >= 1\n\n return {\"complete\": complete}, summarylog.id if summarylog else None, context\n\n def _process_completed_notification(self, summarylog, context):\n if \"node_id\" in context:\n wrap_to_save_queue(\n parse_summarylog,\n summarylog,\n )\n\n def update(self, request, pk=None):\n \"\"\"\n Make a PUT request to update the current session\n\n Requires one of either:\n - progress_delta: increase the progress by this amount\n - progress: set the progress to this amount\n\n Can also update time spent recorded with a delta:\n - time_spent_delta: number of seconds to increase time_spent by\n\n And update the extra_fields value stored:\n - extra_fields: the complete representation to set extra_fields to\n\n If creating or updating attempts for an assessment must include:\n - interactions: an array of objects, if updating an existing attempt, must include attempt_id\n\n Returns an object with the properties:\n - complete: boolean indicating if the resource is completed\n\n If an attempt at an assessment was included, then this parameter will be included:\n - attempts: serialized form of the attempt, equivalent to that returned in pastattempts from\n session initialization\n \"\"\"\n if pk is None:\n raise Http404\n serializer = UpdateSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n end_timestamp = local_now()\n validated_data = serializer.validated_data\n\n with transaction.atomic(), dataset_cache:\n self._precache_dataset_id(request.user)\n\n output, summarylog_id, context = self._update_session(\n pk, request.user, end_timestamp, validated_data\n )\n masterylog_id = self._update_and_return_mastery_log_id(\n request.user, output[\"complete\"], summarylog_id, end_timestamp, context\n )\n if \"interactions\" in validated_data:\n attempt_output = self._update_or_create_attempts(\n pk,\n masterylog_id,\n request.user,\n validated_data[\"interactions\"],\n end_timestamp,\n context,\n )\n output.update(attempt_output)\n return Response(output)\n\n\nclass TotalContentProgressViewSet(viewsets.GenericViewSet):\n def retrieve(self, request, pk=None):\n if request.user.is_anonymous() or pk != request.user.id:\n raise PermissionDenied(\"Can only access progress data for self\")\n progress = (\n request.user.contentsummarylog_set.filter(progress=1)\n .aggregate(Sum(\"progress\"))\n .get(\"progress__sum\")\n )\n return Response(\n {\n \"id\": pk,\n \"progress\": progress,\n }\n )\n\n\nclass BaseLogFilter(FilterSet):\n facility = UUIDFilter(method=\"filter_facility\")\n classroom = UUIDFilter(method=\"filter_classroom\")\n learner_group = UUIDFilter(method=\"filter_learner_group\")\n\n # Only a superuser can filter by facilities\n def filter_facility(self, queryset, name, value):\n return queryset.filter(user__facility=value)\n\n def filter_classroom(self, queryset, name, value):\n return queryset.filter(\n Q(user__memberships__collection_id=value)\n | Q(user__memberships__collection__parent_id=value)\n )\n\n def filter_learner_group(self, queryset, name, value):\n return queryset.filter(user__memberships__collection_id=value)\n\n\nclass MasteryFilter(BaseLogFilter):\n content = UUIDFilter(name=\"summarylog__content_id\")\n\n class Meta:\n model = MasteryLog\n fields = [\"content\"]\n\n\nclass MasteryLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n values = (\n \"user\",\n \"summarylog\",\n \"mastery_criterion\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"mastery_level\",\n \"complete\",\n )\n\n\nclass AttemptFilter(BaseLogFilter):\n content = CharFilter(method=\"filter_content\")\n\n def filter_content(self, queryset, name, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = [\"masterylog\", \"complete\", \"user\", \"content\", \"item\"]\n\n\nclass AttemptLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = AttemptLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = (\"end_timestamp\",)\n ordering = (\"end_timestamp\",)\n\n values = (\n \"id\",\n \"item\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"time_spent\",\n \"complete\",\n \"correct\",\n \"hinted\",\n \"answer\",\n \"simple_answer\",\n \"interaction_history\",\n \"user\",\n \"error\",\n \"masterylog\",\n \"sessionlog\",\n )\n","path":"kolibri/core/logger/api.py"}],"string":"[\n {\n \"content\": \"import logging\\nfrom datetime import timedelta\\nfrom itertools import groupby\\nfrom random import randint\\n\\nfrom django.core.exceptions import PermissionDenied\\nfrom django.db import transaction\\nfrom django.db.models import OuterRef\\nfrom django.db.models import Q\\nfrom django.db.models import Subquery\\nfrom django.db.models import Sum\\nfrom django.http import Http404\\nfrom django_filters.rest_framework import CharFilter\\nfrom django_filters.rest_framework import DjangoFilterBackend\\nfrom django_filters.rest_framework import FilterSet\\nfrom django_filters.rest_framework import UUIDFilter\\nfrom le_utils.constants import content_kinds\\nfrom le_utils.constants import exercises\\nfrom rest_framework import filters\\nfrom rest_framework import serializers\\nfrom rest_framework import viewsets\\nfrom rest_framework.exceptions import ValidationError\\nfrom rest_framework.response import Response\\n\\nfrom .models import AttemptLog\\nfrom .models import ContentSessionLog\\nfrom .models import ContentSummaryLog\\nfrom .models import MasteryLog\\nfrom kolibri.core.api import ReadOnlyValuesViewset\\nfrom kolibri.core.auth.api import KolibriAuthPermissions\\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\\nfrom kolibri.core.auth.models import dataset_cache\\nfrom kolibri.core.content.api import OptionalPageNumberPagination\\nfrom kolibri.core.content.models import AssessmentMetaData\\nfrom kolibri.core.content.models import ContentNode\\nfrom kolibri.core.exams.models import Exam\\nfrom kolibri.core.lessons.models import Lesson\\nfrom kolibri.core.logger.constants import interaction_types\\nfrom kolibri.core.logger.constants.exercise_attempts import MAPPING\\nfrom kolibri.core.notifications.api import create_summarylog\\nfrom kolibri.core.notifications.api import parse_attemptslog\\nfrom kolibri.core.notifications.api import parse_summarylog\\nfrom kolibri.core.notifications.api import quiz_answered_notification\\nfrom kolibri.core.notifications.api import quiz_completed_notification\\nfrom kolibri.core.notifications.api import quiz_started_notification\\nfrom kolibri.core.notifications.tasks import wrap_to_save_queue\\nfrom kolibri.utils.time_utils import local_now\\n\\nlogger = logging.getLogger(__name__)\\n\\n\\nclass HexStringUUIDField(serializers.UUIDField):\\n def __init__(self, **kwargs):\\n self.uuid_format = \\\"hex\\\"\\n super(HexStringUUIDField, self).__init__(**kwargs)\\n\\n def to_internal_value(self, data):\\n return super(HexStringUUIDField, self).to_internal_value(data).hex\\n\\n\\nclass StartSessionSerializer(serializers.Serializer):\\n lesson_id = HexStringUUIDField(required=False)\\n node_id = HexStringUUIDField(required=False)\\n # Do this as a special way of handling our coach generated quizzes\\n quiz_id = HexStringUUIDField(required=False)\\n # A flag to indicate whether to start the session over again\\n repeat = serializers.BooleanField(required=False, default=False)\\n\\n def validate(self, data):\\n if \\\"quiz_id\\\" in data and (\\\"lesson_id\\\" in data or \\\"node_id\\\" in data):\\n raise ValidationError(\\\"quiz_id must not be mixed with other context\\\")\\n if \\\"node_id\\\" not in data and \\\"quiz_id\\\" not in data:\\n raise ValidationError(\\\"node_id is required if not a coach assigned quiz\\\")\\n return data\\n\\n\\nclass InteractionSerializer(serializers.Serializer):\\n id = HexStringUUIDField(required=False)\\n item = serializers.CharField()\\n correct = serializers.FloatField(min_value=0, max_value=1)\\n complete = serializers.BooleanField(required=False, default=False)\\n time_spent = serializers.FloatField(min_value=0)\\n\\n answer = serializers.DictField(required=False)\\n simple_answer = serializers.CharField(required=False, allow_blank=True)\\n error = serializers.BooleanField(required=False, default=False)\\n hinted = serializers.BooleanField(required=False, default=False)\\n # Whether to replace the current answer with the new answer\\n # this is a no-op if the attempt is being created.\\n replace = serializers.BooleanField(required=False, default=False)\\n\\n def validate(self, data):\\n if not data[\\\"error\\\"] and \\\"answer\\\" not in data:\\n raise ValidationError(\\\"Must provide an answer if not an error\\\")\\n return data\\n\\n\\nclass UpdateSessionSerializer(serializers.Serializer):\\n progress_delta = serializers.FloatField(min_value=0, max_value=1.0, required=False)\\n progress = serializers.FloatField(min_value=0, max_value=1.0, required=False)\\n time_spent_delta = serializers.FloatField(min_value=0, required=False)\\n extra_fields = serializers.DictField(required=False)\\n interactions = InteractionSerializer(required=False, many=True)\\n\\n def validate(self, data):\\n if \\\"progress_delta\\\" in data and \\\"progress\\\" in data:\\n raise ValidationError(\\n \\\"must not pass progress_delta and progress in the same request\\\"\\n )\\n return data\\n\\n\\n# The lowest integer that can be encoded\\n# in a Django IntegerField across all backends\\nMIN_INTEGER = -2147483648\\n\\n\\nattemptlog_fields = [\\n \\\"id\\\",\\n \\\"correct\\\",\\n \\\"complete\\\",\\n \\\"hinted\\\",\\n \\\"error\\\",\\n \\\"item\\\",\\n \\\"answer\\\",\\n \\\"time_spent\\\",\\n]\\n\\n\\nclass LogContext(object):\\n \\\"\\\"\\\"\\n Object used to provide a limited dict like interface for encoding the\\n context that can be stored in the sessionlog, and which is then\\n returned to the frontend as part of the initialization of a content\\n session.\\n node_id - represents a specific ContentNode in a topic tree, while the\\n content_id for that node is recorded directly on the sessionlog.\\n quiz_id - represents the id of the Exam Model object that this session\\n is regarding (if any).\\n lesson_id - represents the id of the lesson this node_id is being engaged\\n with from within (if any).\\n mastery_level - represents the current 'try' at an assessment, whether an exercise\\n a practice quiz or a coach assigned quiz. Different mastery_level values\\n indicate a different try at the assessment.\\n\\n This is used to encode the values that are sent when initializing a session\\n (see its use in the _get_context method below)\\n and then also used to hold the values from an existing sessionlog when\\n updating a session (see _update_session method).\\n \\\"\\\"\\\"\\n\\n __slots__ = \\\"node_id\\\", \\\"quiz_id\\\", \\\"lesson_id\\\", \\\"mastery_level\\\"\\n\\n def __init__(self, **kwargs):\\n for key, value in kwargs.items():\\n self[key] = value\\n\\n def __setitem__(self, key, value):\\n if key not in self.__slots__:\\n return\\n setattr(self, key, value)\\n\\n def __getitem__(self, key):\\n if key not in self.__slots__:\\n return\\n return getattr(self, key, None)\\n\\n def __contains__(self, key):\\n return key in self.__slots__ and hasattr(self, key)\\n\\n def to_dict(self):\\n \\\"\\\"\\\"\\n Provide a dictionary of the keys stored in the context object.\\n Used to serialize for inclusion in an API Response.\\n \\\"\\\"\\\"\\n output = {}\\n for slot in self.__slots__:\\n if hasattr(self, slot):\\n output[slot] = getattr(self, slot)\\n return output\\n\\n\\nclass ProgressTrackingViewSet(viewsets.GenericViewSet):\\n def _precache_dataset_id(self, user):\\n if user is None or user.is_anonymous():\\n return\\n key = ContentSessionLog.get_related_dataset_cache_key(\\n user.id, user._meta.db_table\\n )\\n dataset_cache.set(key, user.dataset_id)\\n\\n def _check_quiz_permissions(self, user, quiz_id):\\n if user.is_anonymous():\\n raise PermissionDenied(\\\"Cannot access a quiz if not logged in\\\")\\n if not Exam.objects.filter(\\n active=True,\\n assignments__collection_id__in=user.memberships.all().values(\\n \\\"collection_id\\\"\\n ),\\n id=quiz_id,\\n ).exists():\\n raise PermissionDenied(\\\"User does not have access to this quiz_id\\\")\\n\\n def _check_lesson_permissions(self, user, lesson_id):\\n if user.is_anonymous():\\n raise PermissionDenied(\\\"Cannot access a lesson if not logged in\\\")\\n if not Lesson.objects.filter(\\n lesson_assignments__collection_id__in=user.memberships.all().values(\\n \\\"collection_id\\\"\\n ),\\n id=lesson_id,\\n ).exists():\\n raise ValidationError(\\\"Invalid lesson_id\\\")\\n\\n def _get_context(self, user, validated_data):\\n node_id = validated_data.get(\\\"node_id\\\")\\n quiz_id = validated_data.get(\\\"quiz_id\\\")\\n lesson_id = validated_data.get(\\\"lesson_id\\\")\\n\\n context = LogContext()\\n\\n if node_id is not None:\\n try:\\n node = (\\n ContentNode.objects.annotate(\\n mastery_model=Subquery(\\n AssessmentMetaData.objects.filter(\\n contentnode_id=OuterRef(\\\"id\\\")\\n ).values_list(\\\"mastery_model\\\", flat=True)[:1]\\n )\\n )\\n .values(\\\"content_id\\\", \\\"channel_id\\\", \\\"kind\\\", \\\"mastery_model\\\")\\n .get(id=node_id)\\n )\\n mastery_model = node[\\\"mastery_model\\\"]\\n content_id = node[\\\"content_id\\\"]\\n channel_id = node[\\\"channel_id\\\"]\\n kind = node[\\\"kind\\\"]\\n context[\\\"node_id\\\"] = node_id\\n if lesson_id:\\n self._check_lesson_permissions(user, lesson_id)\\n context[\\\"lesson_id\\\"] = lesson_id\\n except ContentNode.DoesNotExist:\\n raise ValidationError(\\\"Invalid node_id\\\")\\n elif quiz_id is not None:\\n self._check_quiz_permissions(user, quiz_id)\\n mastery_model = {\\\"type\\\": \\\"quiz\\\", \\\"coach_assigned\\\": True}\\n content_id = quiz_id\\n channel_id = None\\n kind = content_kinds.QUIZ\\n context[\\\"quiz_id\\\"] = quiz_id\\n return content_id, channel_id, kind, mastery_model, context\\n\\n def _get_or_create_summarylog(\\n self,\\n user,\\n content_id,\\n channel_id,\\n kind,\\n mastery_model,\\n start_timestamp,\\n repeat,\\n context,\\n ):\\n if not user:\\n output = {\\n \\\"progress\\\": 0,\\n \\\"extra_fields\\\": {},\\n \\\"time_spent\\\": 0,\\n \\\"complete\\\": False,\\n }\\n if mastery_model:\\n output.update(\\n {\\n \\\"mastery_criterion\\\": mastery_model,\\n \\\"pastattempts\\\": [],\\n \\\"totalattempts\\\": 0,\\n \\\"complete\\\": False,\\n }\\n )\\n return output\\n\\n try:\\n summarylog = ContentSummaryLog.objects.get(\\n content_id=content_id,\\n user=user,\\n )\\n updated_fields = (\\\"end_timestamp\\\", \\\"channel_id\\\", \\\"_morango_dirty_bit\\\")\\n if repeat:\\n summarylog.progress = 0\\n updated_fields += (\\\"progress\\\",)\\n summarylog.channel_id = channel_id\\n summarylog.end_timestamp = start_timestamp\\n summarylog.save(update_fields=updated_fields)\\n except ContentSummaryLog.DoesNotExist:\\n summarylog = ContentSummaryLog.objects.create(\\n content_id=content_id,\\n user=user,\\n channel_id=channel_id,\\n kind=kind,\\n start_timestamp=start_timestamp,\\n end_timestamp=start_timestamp,\\n )\\n self._process_created_notification(summarylog, context)\\n\\n output = {\\n \\\"progress\\\": summarylog.progress,\\n \\\"extra_fields\\\": summarylog.extra_fields,\\n \\\"time_spent\\\": summarylog.time_spent,\\n \\\"complete\\\": summarylog.progress >= 1,\\n }\\n if mastery_model:\\n assessment_output, mastery_level = self._start_assessment_session(\\n mastery_model,\\n summarylog,\\n user,\\n start_timestamp,\\n repeat,\\n context,\\n )\\n output.update(assessment_output)\\n context[\\\"mastery_level\\\"] = mastery_level\\n return output\\n\\n def create(self, request):\\n \\\"\\\"\\\"\\n Make a POST request to start a content session.\\n\\n Requires one of either:\\n - node_id: the pk of the resource\\n - quiz_id: the pk of the quiz (Exam) object\\n\\n Optional parameters:\\n - repeat: whether to reset previous progress on this content to zero and start fresh\\n - lesson_id: if this is being engaged within a lesson\\n\\n Returns object with properties:\\n - session_id: id of the session object that was created by this call\\n - context: contains node_id, quiz_id, lesson_id, and mastery_level as appropriate\\n - progress: any previous progress on this content resource\\n - time_spent: any previous time spent on this content resource\\n - extra_fields: any previously recorded additional data stored for this resource\\n - complete: whether this resource is completed by this user\\n\\n If this is an assessment, return object will also include:\\n - mastery_criterion: mastery criterion that should be applied to determine completion\\n - pastattempts: serialized subset of recent responses, used to determine completion\\n - totalattempts: total number of previous responses within this run of the assessment resource\\n \\\"\\\"\\\"\\n serializer = StartSessionSerializer(\\n data=request.data, context={\\\"request\\\": request}\\n )\\n serializer.is_valid(raise_exception=True)\\n start_timestamp = local_now()\\n repeat = serializer.validated_data[\\\"repeat\\\"]\\n\\n content_id, channel_id, kind, mastery_model, context = self._get_context(\\n request.user, serializer.validated_data\\n )\\n\\n with transaction.atomic(), dataset_cache:\\n\\n user = None if request.user.is_anonymous() else request.user\\n\\n self._precache_dataset_id(user)\\n\\n output = self._get_or_create_summarylog(\\n user,\\n content_id,\\n channel_id,\\n kind,\\n mastery_model,\\n start_timestamp,\\n repeat,\\n context,\\n )\\n\\n # Must ensure there is no user here to maintain user privacy for logging.\\n visitor_id = (\\n request.COOKIES.get(\\\"visitor_id\\\")\\n if hasattr(request, \\\"COOKIES\\\") and not user\\n else None\\n )\\n sessionlog = ContentSessionLog.objects.create(\\n content_id=content_id,\\n channel_id=channel_id,\\n start_timestamp=start_timestamp,\\n end_timestamp=start_timestamp,\\n user=user,\\n kind=kind,\\n visitor_id=visitor_id,\\n extra_fields={\\\"context\\\": context.to_dict()},\\n )\\n output.update({\\\"session_id\\\": sessionlog.id, \\\"context\\\": context.to_dict()})\\n return Response(output)\\n\\n def _process_created_notification(self, summarylog, context):\\n # dont create notifications upon creating a summary log for an exercise\\n # notifications should only be triggered upon first attempting a question in the exercise\\n if \\\"node_id\\\" in context and summarylog.kind != content_kinds.EXERCISE:\\n # We have sufficient information to only trigger notifications for the specific\\n # lesson that this is being engaged with, but until we can work out the exact\\n # way that we want to match this with contextual progress tracking, we are\\n # not changing this for now.\\n wrap_to_save_queue(\\n create_summarylog,\\n summarylog,\\n )\\n\\n def _process_masterylog_created_notification(self, masterylog, context):\\n if \\\"quiz_id\\\" in context:\\n wrap_to_save_queue(\\n quiz_started_notification, masterylog, context[\\\"quiz_id\\\"]\\n )\\n\\n def _check_quiz_log_permissions(self, masterylog):\\n if (\\n masterylog\\n and masterylog.complete\\n and masterylog.mastery_criterion.get(\\\"type\\\") == \\\"quiz\\\"\\n and masterylog.mastery_criterion.get(\\\"coach_assigned\\\")\\n ):\\n raise PermissionDenied(\\\"Cannot update a finished coach assigned quiz\\\")\\n\\n def _get_or_create_masterylog(\\n self,\\n user,\\n summarylog,\\n repeat,\\n mastery_model,\\n start_timestamp,\\n context,\\n ):\\n masterylog = (\\n MasteryLog.objects.filter(\\n summarylog=summarylog,\\n user=user,\\n )\\n .order_by(\\\"-complete\\\", \\\"-end_timestamp\\\")\\n .first()\\n )\\n\\n if masterylog is None or (masterylog.complete and repeat):\\n # There is no previous masterylog, or the previous masterylog\\n # is complete, and the request is requesting a new attempt.\\n # Here we generate a mastery_level value - this serves to disambiguate multiple\\n # retries at an assessment (either an exercise, practice quiz, or coach assigned quiz).\\n # Having the same mastery_level/summarylog (and hence user) pair will result in the same\\n # identifier being created. So if the same user engages with the same assessment on different\\n # devices, when the data synchronizes, if the mastery_level is the same, this data will be\\n # unified under a single try.\\n if mastery_model.get(\\\"coach_assigned\\\"):\\n # To prevent coach assigned quiz mastery logs from propagating to older\\n # Kolibri versions, we use negative mastery levels for these.\\n # In older versions of Kolibri the mastery_level is validated to be\\n # between 1 and 10 - so these values will fail validation and hence will\\n # not be deserialized from the morango store.\\n # We choose a random integer across the range of acceptable values,\\n # in order to prevent collisions across multiple devices when users\\n # start different tries of the same coach assigned quiz.\\n # With a length of 9 digits for the decimal number, we would need approximately\\n # 45 tries to have a 1 in a million chance of a collision.\\n # Numbers derived using the formula for the generalized birthday problem:\\n # https://en.wikipedia.org/wiki/Birthday_problem#The_generalized_birthday_problem\\n # n=sqrt(2*d*ln(1/(1-p))\\n # where d is the number of combinations of d digits, p is the probability\\n # So for 9 digits, d = 10^9\\n # p = 0.000001 for one in a million\\n mastery_level = randint(MIN_INTEGER, -1)\\n else:\\n mastery_level = (\\n masterylog.mastery_level + 1 if masterylog is not None else 1\\n )\\n\\n masterylog = MasteryLog.objects.create(\\n summarylog=summarylog,\\n user=user,\\n mastery_criterion=mastery_model,\\n start_timestamp=start_timestamp,\\n end_timestamp=start_timestamp,\\n mastery_level=mastery_level,\\n )\\n self._process_masterylog_created_notification(masterylog, context)\\n else:\\n self._check_quiz_log_permissions(masterylog)\\n return masterylog\\n\\n def _start_assessment_session(\\n self, mastery_model, summarylog, user, start_timestamp, repeat, context\\n ):\\n masterylog = self._get_or_create_masterylog(\\n user,\\n summarylog,\\n repeat,\\n mastery_model,\\n start_timestamp,\\n context,\\n )\\n\\n mastery_criterion = masterylog.mastery_criterion\\n exercise_type = mastery_criterion.get(\\\"type\\\")\\n attemptlogs = masterylog.attemptlogs.values(*attemptlog_fields).order_by(\\n \\\"-start_timestamp\\\"\\n )\\n\\n # get the first x logs depending on the exercise type\\n if exercise_type == exercises.M_OF_N:\\n attemptlogs = attemptlogs[: mastery_criterion[\\\"n\\\"]]\\n elif exercise_type in MAPPING:\\n attemptlogs = attemptlogs[: MAPPING[exercise_type]]\\n elif exercise_type == \\\"quiz\\\":\\n attemptlogs = attemptlogs.order_by()\\n else:\\n attemptlogs = attemptlogs[:10]\\n\\n return {\\n \\\"mastery_criterion\\\": mastery_criterion,\\n \\\"pastattempts\\\": attemptlogs,\\n \\\"totalattempts\\\": masterylog.attemptlogs.count(),\\n \\\"complete\\\": masterylog.complete,\\n }, masterylog.mastery_level\\n\\n def _generate_interaction_summary(self, validated_data):\\n if validated_data[\\\"error\\\"]:\\n return {\\n \\\"type\\\": interaction_types.ERROR,\\n }\\n elif validated_data[\\\"hinted\\\"]:\\n return {\\n \\\"type\\\": interaction_types.HINT,\\n \\\"answer\\\": validated_data[\\\"answer\\\"],\\n }\\n return {\\n \\\"type\\\": interaction_types.ANSWER,\\n \\\"answer\\\": validated_data[\\\"answer\\\"],\\n \\\"correct\\\": validated_data[\\\"correct\\\"],\\n }\\n\\n def _process_masterylog_completed_notification(self, masterylog, context):\\n if \\\"quiz_id\\\" in context:\\n wrap_to_save_queue(\\n quiz_completed_notification, masterylog, context[\\\"quiz_id\\\"]\\n )\\n\\n def _update_and_return_mastery_log_id(\\n self, user, complete, summarylog_id, end_timestamp, context\\n ):\\n if not user.is_anonymous() and context[\\\"mastery_level\\\"] is not None:\\n try:\\n masterylog = MasteryLog.objects.get(\\n user=user,\\n mastery_level=context[\\\"mastery_level\\\"],\\n summarylog_id=summarylog_id,\\n )\\n if complete and not masterylog.complete:\\n masterylog.complete = True\\n masterylog.completion_timestamp = end_timestamp\\n masterylog.save(\\n update_fields=(\\n \\\"complete\\\",\\n \\\"completion_timestamp\\\",\\n \\\"_morango_dirty_bit\\\",\\n )\\n )\\n self._process_masterylog_completed_notification(masterylog, context)\\n else:\\n self._check_quiz_log_permissions(masterylog)\\n return masterylog.id\\n except MasteryLog.DoesNotExist:\\n raise ValidationError(\\n \\\"Invalid mastery_level value, this session has not been started.\\\"\\n )\\n\\n def _update_attempt(self, attemptlog, interaction, update_fields, end_timestamp):\\n\\n interaction_summary = self._generate_interaction_summary(interaction)\\n\\n attemptlog.interaction_history += [interaction_summary]\\n attemptlog.end_timestamp = end_timestamp\\n attemptlog.time_spent = interaction[\\\"time_spent\\\"]\\n\\n if interaction[\\\"error\\\"] and not attemptlog.error:\\n attemptlog.error = interaction[\\\"error\\\"]\\n update_fields.add(\\\"error\\\")\\n\\n # Mark hinted only if it is not already correct, and don't undo previously hinted\\n if interaction[\\\"hinted\\\"] and not attemptlog.hinted and not attemptlog.correct:\\n attemptlog.hinted = interaction[\\\"hinted\\\"]\\n update_fields.add(\\\"hinted\\\")\\n\\n if interaction[\\\"replace\\\"]:\\n attemptlog.correct = interaction[\\\"correct\\\"]\\n update_fields.add(\\\"correct\\\")\\n\\n if \\\"answer\\\" in interaction:\\n attemptlog.answer = interaction[\\\"answer\\\"]\\n update_fields.add(\\\"answer\\\")\\n\\n if \\\"simple_answer\\\" in interaction:\\n attemptlog.simple_answer = interaction[\\\"simple_answer\\\"]\\n update_fields.add(\\\"simple_answer\\\")\\n\\n if interaction[\\\"complete\\\"] and not attemptlog.complete:\\n attemptlog.complete = interaction[\\\"complete\\\"]\\n attemptlog.completion_timestamp = end_timestamp\\n update_fields.update({\\\"complete\\\", \\\"completion_timestamp\\\"})\\n\\n def _create_attempt(\\n self, session_id, masterylog_id, user, interaction, end_timestamp\\n ):\\n start_timestamp = end_timestamp - timedelta(seconds=interaction[\\\"time_spent\\\"])\\n\\n interaction_summary = self._generate_interaction_summary(interaction)\\n\\n del interaction[\\\"replace\\\"]\\n\\n return AttemptLog(\\n sessionlog_id=session_id,\\n masterylog_id=masterylog_id,\\n interaction_history=[interaction_summary],\\n user=user,\\n start_timestamp=start_timestamp,\\n completion_timestamp=end_timestamp if interaction[\\\"complete\\\"] else None,\\n end_timestamp=end_timestamp,\\n **interaction\\n )\\n\\n def _update_or_create_attempts(\\n self, session_id, masterylog_id, user, interactions, end_timestamp, context\\n ):\\n user = None if user.is_anonymous() else user\\n\\n output = []\\n\\n for _, item_interactions in groupby(interactions, lambda x: x[\\\"item\\\"]):\\n created = False\\n update_fields = {\\n \\\"interaction_history\\\",\\n \\\"end_timestamp\\\",\\n \\\"time_spent\\\",\\n \\\"_morango_dirty_bit\\\",\\n }\\n item_interactions = list(item_interactions)\\n if \\\"id\\\" in item_interactions[0]:\\n try:\\n attemptlog = AttemptLog.objects.get(\\n id=item_interactions[0][\\\"id\\\"],\\n masterylog_id=masterylog_id,\\n user=user,\\n )\\n except AttemptLog.DoesNotExist:\\n raise ValidationError(\\\"Invalid attemptlog id specified\\\")\\n else:\\n attemptlog = self._create_attempt(\\n session_id,\\n masterylog_id,\\n user,\\n item_interactions[0],\\n end_timestamp,\\n )\\n created = True\\n item_interactions = item_interactions[1:]\\n updated = bool(item_interactions)\\n\\n for response in item_interactions:\\n self._update_attempt(attemptlog, response, update_fields, end_timestamp)\\n\\n self._process_attempt_notifications(\\n attemptlog, context, user, created, updated\\n )\\n attemptlog.save(\\n update_fields=None if created else update_fields, force_insert=created\\n )\\n attempt = {}\\n for field in attemptlog_fields:\\n attempt[field] = getattr(attemptlog, field)\\n output.append(attempt)\\n return {\\\"attempts\\\": output}\\n\\n def _process_attempt_notifications(\\n self, attemptlog, context, user, created, updated\\n ):\\n if user is None:\\n return\\n if \\\"lesson_id\\\" in context:\\n wrap_to_save_queue(parse_attemptslog, attemptlog)\\n if created and \\\"quiz_id\\\" in context:\\n wrap_to_save_queue(\\n quiz_answered_notification, attemptlog, context[\\\"quiz_id\\\"]\\n )\\n\\n def _get_session_log(self, session_id, user):\\n try:\\n if user.is_anonymous():\\n return ContentSessionLog.objects.get(id=session_id, user__isnull=True)\\n else:\\n return ContentSessionLog.objects.get(id=session_id, user=user)\\n except ContentSessionLog.DoesNotExist:\\n raise Http404(\\n \\\"ContentSessionLog with id {} does not exist\\\".format(session_id)\\n )\\n\\n def _normalize_progress(self, progress):\\n return max(0, min(1.0, progress))\\n\\n def _update_content_log(self, log, end_timestamp, validated_data):\\n update_fields = (\\\"end_timestamp\\\", \\\"_morango_dirty_bit\\\")\\n\\n log.end_timestamp = end_timestamp\\n if \\\"progress_delta\\\" in validated_data:\\n update_fields += (\\\"progress\\\",)\\n log.progress = self._normalize_progress(\\n log.progress + validated_data[\\\"progress_delta\\\"]\\n )\\n elif \\\"progress\\\" in validated_data:\\n update_fields += (\\\"progress\\\",)\\n log.progress = self._normalize_progress(validated_data[\\\"progress\\\"])\\n if \\\"time_spent_delta\\\" in validated_data:\\n update_fields += (\\\"time_spent\\\",)\\n log.time_spent += validated_data[\\\"time_spent_delta\\\"]\\n return update_fields\\n\\n def _update_summary_log(\\n self, user, sessionlog, end_timestamp, validated_data, context\\n ):\\n if user.is_anonymous():\\n return\\n summarylog = ContentSummaryLog.objects.get(\\n content_id=sessionlog.content_id, user=user\\n )\\n was_complete = summarylog.progress >= 1\\n\\n update_fields = self._update_content_log(\\n summarylog, end_timestamp, validated_data\\n )\\n\\n if summarylog.progress >= 1 and not was_complete:\\n summarylog.completion_timestamp = end_timestamp\\n update_fields += (\\\"completion_timestamp\\\",)\\n self._process_completed_notification(summarylog, context)\\n if \\\"extra_fields\\\" in validated_data:\\n update_fields += (\\\"extra_fields\\\",)\\n summarylog.extra_fields = validated_data[\\\"extra_fields\\\"]\\n\\n summarylog.save(update_fields=update_fields)\\n return summarylog\\n\\n def _update_session(self, session_id, user, end_timestamp, validated_data):\\n sessionlog = self._get_session_log(session_id, user)\\n\\n context = LogContext(**sessionlog.extra_fields.get(\\\"context\\\", {}))\\n\\n if \\\"quiz_id\\\" in context:\\n self._check_quiz_permissions(user, context[\\\"quiz_id\\\"])\\n\\n update_fields = self._update_content_log(\\n sessionlog, end_timestamp, validated_data\\n )\\n sessionlog.save(update_fields=update_fields)\\n\\n summarylog = self._update_summary_log(\\n user, sessionlog, end_timestamp, validated_data, context\\n )\\n\\n if summarylog is not None:\\n complete = summarylog.progress >= 1\\n else:\\n complete = sessionlog.progress >= 1\\n\\n return {\\\"complete\\\": complete}, summarylog.id if summarylog else None, context\\n\\n def _process_completed_notification(self, summarylog, context):\\n if \\\"node_id\\\" in context:\\n wrap_to_save_queue(\\n parse_summarylog,\\n summarylog,\\n )\\n\\n def update(self, request, pk=None):\\n \\\"\\\"\\\"\\n Make a PUT request to update the current session\\n\\n Requires one of either:\\n - progress_delta: increase the progress by this amount\\n - progress: set the progress to this amount\\n\\n Can also update time spent recorded with a delta:\\n - time_spent_delta: number of seconds to increase time_spent by\\n\\n And update the extra_fields value stored:\\n - extra_fields: the complete representation to set extra_fields to\\n\\n If creating or updating attempts for an assessment must include:\\n - interactions: an array of objects, if updating an existing attempt, must include attempt_id\\n\\n Returns an object with the properties:\\n - complete: boolean indicating if the resource is completed\\n\\n If an attempt at an assessment was included, then this parameter will be included:\\n - attempts: serialized form of the attempt, equivalent to that returned in pastattempts from\\n session initialization\\n \\\"\\\"\\\"\\n if pk is None:\\n raise Http404\\n serializer = UpdateSessionSerializer(\\n data=request.data, context={\\\"request\\\": request}\\n )\\n serializer.is_valid(raise_exception=True)\\n end_timestamp = local_now()\\n validated_data = serializer.validated_data\\n\\n with transaction.atomic(), dataset_cache:\\n self._precache_dataset_id(request.user)\\n\\n output, summarylog_id, context = self._update_session(\\n pk, request.user, end_timestamp, validated_data\\n )\\n masterylog_id = self._update_and_return_mastery_log_id(\\n request.user, output[\\\"complete\\\"], summarylog_id, end_timestamp, context\\n )\\n if \\\"interactions\\\" in validated_data:\\n attempt_output = self._update_or_create_attempts(\\n pk,\\n masterylog_id,\\n request.user,\\n validated_data[\\\"interactions\\\"],\\n end_timestamp,\\n context,\\n )\\n output.update(attempt_output)\\n return Response(output)\\n\\n\\nclass TotalContentProgressViewSet(viewsets.GenericViewSet):\\n def retrieve(self, request, pk=None):\\n if request.user.is_anonymous() or pk != request.user.id:\\n raise PermissionDenied(\\\"Can only access progress data for self\\\")\\n progress = (\\n request.user.contentsummarylog_set.filter(progress=1)\\n .aggregate(Sum(\\\"progress\\\"))\\n .get(\\\"progress__sum\\\")\\n )\\n return Response(\\n {\\n \\\"id\\\": pk,\\n \\\"progress\\\": progress,\\n }\\n )\\n\\n\\nclass BaseLogFilter(FilterSet):\\n facility = UUIDFilter(method=\\\"filter_facility\\\")\\n classroom = UUIDFilter(method=\\\"filter_classroom\\\")\\n learner_group = UUIDFilter(method=\\\"filter_learner_group\\\")\\n\\n # Only a superuser can filter by facilities\\n def filter_facility(self, queryset, name, value):\\n return queryset.filter(user__facility=value)\\n\\n def filter_classroom(self, queryset, name, value):\\n return queryset.filter(\\n Q(user__memberships__collection_id=value)\\n | Q(user__memberships__collection__parent_id=value)\\n )\\n\\n def filter_learner_group(self, queryset, name, value):\\n return queryset.filter(user__memberships__collection_id=value)\\n\\n\\nclass MasteryFilter(BaseLogFilter):\\n content = UUIDFilter(name=\\\"summarylog__content_id\\\")\\n\\n class Meta:\\n model = MasteryLog\\n fields = [\\\"content\\\"]\\n\\n\\nclass MasteryLogViewSet(ReadOnlyValuesViewset):\\n permission_classes = (KolibriAuthPermissions,)\\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\\n queryset = MasteryLog.objects.all()\\n pagination_class = OptionalPageNumberPagination\\n filter_class = MasteryFilter\\n values = (\\n \\\"user\\\",\\n \\\"summarylog\\\",\\n \\\"mastery_criterion\\\",\\n \\\"start_timestamp\\\",\\n \\\"end_timestamp\\\",\\n \\\"completion_timestamp\\\",\\n \\\"mastery_level\\\",\\n \\\"complete\\\",\\n )\\n\\n\\nclass AttemptFilter(BaseLogFilter):\\n content = CharFilter(method=\\\"filter_content\\\")\\n\\n def filter_content(self, queryset, name, value):\\n return queryset.filter(masterylog__summarylog__content_id=value)\\n\\n class Meta:\\n model = AttemptLog\\n fields = [\\\"masterylog\\\", \\\"complete\\\", \\\"user\\\", \\\"content\\\", \\\"item\\\"]\\n\\n\\nclass AttemptLogViewSet(ReadOnlyValuesViewset):\\n permission_classes = (KolibriAuthPermissions,)\\n filter_backends = (\\n KolibriAuthPermissionsFilter,\\n DjangoFilterBackend,\\n filters.OrderingFilter,\\n )\\n queryset = AttemptLog.objects.all()\\n pagination_class = OptionalPageNumberPagination\\n filter_class = AttemptFilter\\n ordering_fields = (\\\"end_timestamp\\\",)\\n ordering = (\\\"end_timestamp\\\",)\\n\\n values = (\\n \\\"id\\\",\\n \\\"item\\\",\\n \\\"start_timestamp\\\",\\n \\\"end_timestamp\\\",\\n \\\"completion_timestamp\\\",\\n \\\"time_spent\\\",\\n \\\"complete\\\",\\n \\\"correct\\\",\\n \\\"hinted\\\",\\n \\\"answer\\\",\\n \\\"simple_answer\\\",\\n \\\"interaction_history\\\",\\n \\\"user\\\",\\n \\\"error\\\",\\n \\\"masterylog\\\",\\n \\\"sessionlog\\\",\\n )\\n\",\n \"path\": \"kolibri/core/logger/api.py\"\n }\n]"},"pr_diff":{"kind":"string","value":"diff --git a/kolibri/core/logger/api.py b/kolibri/core/logger/api.py\nindex e5b55a5c8fa..a2f040fac14 100644\n--- a/kolibri/core/logger/api.py\n+++ b/kolibri/core/logger/api.py\n@@ -918,6 +918,7 @@ class AttemptLogViewSet(ReadOnlyValuesViewset):\n ordering = (\"end_timestamp\",)\n \n values = (\n+ \"id\",\n \"item\",\n \"start_timestamp\",\n \"end_timestamp\",\ndiff --git a/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue b/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue\nindex e62a7b30123..8b9a0c33c1f 100644\n--- a/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue\n+++ b/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue\n@@ -68,10 +68,7 @@\n \n