{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'SoraWatermarkRemover' ) { link.textContent = 'SoraWatermarkRemover'; link.href = 'https://sora2watermarkremover.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); ')\n print('')\n\nif __name__==\"__main__\":\n if len(sys.argv) > 1: main(sys.argv[1:])\n else: print(__doc__)\n"},"repo_name":{"kind":"string","value":"tangentstorm/scenetool"},"path":{"kind":"string","value":"spike/vue2svg.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":5403,"string":"5,403"}}},{"rowIdx":44294,"cells":{"code":{"kind":"string","value":"from staffjoy.resource import Resource\nfrom staffjoy.resources.location import Location\nfrom staffjoy.resources.admin import Admin\nfrom staffjoy.resources.organization_worker import OrganizationWorker\n\n\nclass Organization(Resource):\n PATH = \"organizations/{organization_id}\"\n ID_NAME = \"organization_id\"\n\n def get_locations(self, **kwargs):\n return Location.get_all(parent=self, **kwargs)\n\n def get_location(self, id):\n return Location.get(parent=self, id=id)\n\n def create_location(self, **kwargs):\n return Location.create(parent=self, **kwargs)\n\n def get_admins(self):\n return Admin.get_all(parent=self)\n\n def get_admin(self, id):\n return Admin.get(parent=self, id=id)\n\n def create_admin(self, **kwargs):\n \"\"\"Typically just pass email\"\"\"\n return Admin.create(parent=self, **kwargs)\n\n def get_workers(self, **kwargs):\n return OrganizationWorker.get_all(parent=self, **kwargs)\n"},"repo_name":{"kind":"string","value":"Staffjoy/client_python"},"path":{"kind":"string","value":"staffjoy/resources/organization.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":959,"string":"959"}}},{"rowIdx":44295,"cells":{"code":{"kind":"string","value":"# coding=utf-8\n\nimport logging\nimport time\n\nfrom adapter import Adapter\n\nDROIDBOT_APP_PACKAGE = \"io.github.ylimit.droidbotapp\"\nIME_SERVICE = DROIDBOT_APP_PACKAGE + \"/.DroidBotIME\"\n\n\nclass DroidBotImeException(Exception):\n \"\"\"\n Exception in telnet connection\n \"\"\"\n pass\n\n\nclass DroidBotIme(Adapter):\n \"\"\"\n a connection with droidbot ime app.\n \"\"\"\n def __init__(self, device=None):\n \"\"\"\n initiate a emulator console via telnet\n :param device: instance of Device\n :return:\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n if device is None:\n from droidbot.device import Device\n device = Device()\n self.device = device\n self.connected = False\n\n def set_up(self):\n device = self.device\n if DROIDBOT_APP_PACKAGE in device.adb.get_installed_apps():\n self.logger.debug(\"DroidBot app was already installed.\")\n else:\n # install droidbot app\n try:\n import pkg_resources\n droidbot_app_path = pkg_resources.resource_filename(\"droidbot\", \"resources/droidbotApp.apk\")\n install_cmd = \"install %s\" % droidbot_app_path\n self.device.adb.run_cmd(install_cmd)\n self.logger.debug(\"DroidBot app installed.\")\n except Exception as e:\n self.logger.warning(e.message)\n self.logger.warning(\"Failed to install DroidBotApp.\")\n\n def tear_down(self):\n self.device.uninstall_app(DROIDBOT_APP_PACKAGE)\n\n def connect(self):\n r_enable = self.device.adb.shell(\"ime enable %s\" % IME_SERVICE)\n if r_enable.endswith(\"now enabled\"):\n r_set = self.device.adb.shell(\"ime set %s\" % IME_SERVICE)\n if r_set.endswith(\"selected\"):\n self.connected = True\n return\n self.logger.warning(\"Failed to connect DroidBotIME!\")\n\n def check_connectivity(self):\n \"\"\"\n check if droidbot app is connected\n :return: True for connected\n \"\"\"\n return self.connected\n\n def disconnect(self):\n \"\"\"\n disconnect telnet\n \"\"\"\n self.connected = False\n r_disable = self.device.adb.shell(\"ime disable %s\" % IME_SERVICE)\n if r_disable.endswith(\"now disabled\"):\n self.connected = False\n print \"[CONNECTION] %s is disconnected\" % self.__class__.__name__\n return\n self.logger.warning(\"Failed to disconnect DroidBotIME!\")\n\n def input_text(self, text, mode=0):\n \"\"\"\n Input text to target device\n :param text: text to input, can be unicode format\n :param mode: 0 - set text; 1 - append text.\n \"\"\"\n input_cmd = \"am broadcast -a DROIDBOT_INPUT_TEXT --es text \\\"%s\\\" --ei mode %d\" % (text, mode)\n self.device.adb.shell(input_cmd)\n\n\nif __name__ == \"__main__\":\n droidbot_ime_conn = DroidBotIme()\n droidbot_ime_conn.set_up()\n droidbot_ime_conn.connect()\n droidbot_ime_conn.input_text(\"hello world!\", 0)\n droidbot_ime_conn.input_text(u\"世界你好!\", 1)\n time.sleep(2)\n droidbot_ime_conn.input_text(u\"再见。Bye bye.\", 0)\n droidbot_ime_conn.disconnect()\n droidbot_ime_conn.tear_down()\n"},"repo_name":{"kind":"string","value":"nastya/droidbot"},"path":{"kind":"string","value":"droidbot/adapter/droidbot_ime.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":3282,"string":"3,282"}}},{"rowIdx":44296,"cells":{"code":{"kind":"string","value":"from __future__ import annotations\n\nimport operator\nfrom collections import defaultdict\nfrom contextlib import ExitStack\nfrom datetime import datetime, timedelta, timezone\nfrom logging import Logger, getLogger\nfrom typing import Any, Callable, ClassVar, Iterable\nfrom uuid import UUID\n\nimport attrs\nimport pymongo\nimport tenacity\nfrom attrs.validators import instance_of\nfrom bson import CodecOptions, UuidRepresentation\nfrom bson.codec_options import TypeEncoder, TypeRegistry\nfrom pymongo import ASCENDING, DeleteOne, MongoClient, UpdateOne\nfrom pymongo.collection import Collection\nfrom pymongo.errors import ConnectionFailure, DuplicateKeyError\nfrom tenacity import Retrying\n\nfrom ..abc import DataStore, EventBroker, EventSource, Job, Schedule, Serializer\nfrom ..enums import CoalescePolicy, ConflictPolicy, JobOutcome\nfrom ..eventbrokers.local import LocalEventBroker\nfrom ..events import (\n DataStoreEvent, JobAcquired, JobAdded, JobReleased, ScheduleAdded, ScheduleRemoved,\n ScheduleUpdated, TaskAdded, TaskRemoved, TaskUpdated)\nfrom ..exceptions import (\n ConflictingIdError, DeserializationError, SerializationError, TaskLookupError)\nfrom ..serializers.pickle import PickleSerializer\nfrom ..structures import JobResult, RetrySettings, Task\nfrom ..util import reentrant\n\n\nclass CustomEncoder(TypeEncoder):\n def __init__(self, python_type: type, encoder: Callable):\n self._python_type = python_type\n self._encoder = encoder\n\n @property\n def python_type(self) -> type:\n return self._python_type\n\n def transform_python(self, value: Any) -> Any:\n return self._encoder(value)\n\n\ndef ensure_uuid_presentation(client: MongoClient) -> None:\n # if client.\n pass\n\n\n@reentrant\n@attrs.define(eq=False)\nclass MongoDBDataStore(DataStore):\n client: MongoClient = attrs.field(validator=instance_of(MongoClient))\n serializer: Serializer = attrs.field(factory=PickleSerializer, kw_only=True)\n database: str = attrs.field(default='apscheduler', kw_only=True)\n lock_expiration_delay: float = attrs.field(default=30, kw_only=True)\n retry_settings: RetrySettings = attrs.field(default=RetrySettings())\n start_from_scratch: bool = attrs.field(default=False, kw_only=True)\n\n _task_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Task)]\n _schedule_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Schedule)]\n _job_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Job)]\n\n _logger: Logger = attrs.field(init=False, factory=lambda: getLogger(__name__))\n _retrying: Retrying = attrs.field(init=False)\n _exit_stack: ExitStack = attrs.field(init=False, factory=ExitStack)\n _events: EventBroker = attrs.field(init=False, factory=LocalEventBroker)\n _local_tasks: dict[str, Task] = attrs.field(init=False, factory=dict)\n\n def __attrs_post_init__(self) -> None:\n # Construct the Tenacity retry controller\n self._retrying = Retrying(stop=self.retry_settings.stop, wait=self.retry_settings.wait,\n retry=tenacity.retry_if_exception_type(ConnectionFailure),\n after=self._after_attempt, reraise=True)\n\n type_registry = TypeRegistry([\n CustomEncoder(timedelta, timedelta.total_seconds),\n CustomEncoder(ConflictPolicy, operator.attrgetter('name')),\n CustomEncoder(CoalescePolicy, operator.attrgetter('name')),\n CustomEncoder(JobOutcome, operator.attrgetter('name'))\n ])\n codec_options = CodecOptions(tz_aware=True, type_registry=type_registry,\n uuid_representation=UuidRepresentation.STANDARD)\n database = self.client.get_database(self.database, codec_options=codec_options)\n self._tasks: Collection = database['tasks']\n self._schedules: Collection = database['schedules']\n self._jobs: Collection = database['jobs']\n self._jobs_results: Collection = database['job_results']\n\n @classmethod\n def from_url(cls, uri: str, **options) -> MongoDBDataStore:\n client = MongoClient(uri)\n return cls(client, **options)\n\n @property\n def events(self) -> EventSource:\n return self._events\n\n def _after_attempt(self, retry_state: tenacity.RetryCallState) -> None:\n self._logger.warning('Temporary data store error (attempt %d): %s',\n retry_state.attempt_number, retry_state.outcome.exception())\n\n def __enter__(self):\n server_info = self.client.server_info()\n if server_info['versionArray'] < [4, 0]:\n raise RuntimeError(f\"MongoDB server must be at least v4.0; current version = \"\n f\"{server_info['version']}\")\n\n self._exit_stack.__enter__()\n self._exit_stack.enter_context(self._events)\n\n for attempt in self._retrying:\n with attempt, self.client.start_session() as session:\n if self.start_from_scratch:\n self._tasks.delete_many({}, session=session)\n self._schedules.delete_many({}, session=session)\n self._jobs.delete_many({}, session=session)\n self._jobs_results.delete_many({}, session=session)\n\n self._schedules.create_index('next_fire_time', session=session)\n self._jobs.create_index('task_id', session=session)\n self._jobs.create_index('created_at', session=session)\n self._jobs.create_index('tags', session=session)\n self._jobs_results.create_index('finished_at', session=session)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._exit_stack.__exit__(exc_type, exc_val, exc_tb)\n\n def add_task(self, task: Task) -> None:\n for attempt in self._retrying:\n with attempt:\n previous = self._tasks.find_one_and_update(\n {'_id': task.id},\n {'$set': task.marshal(self.serializer),\n '$setOnInsert': {'running_jobs': 0}},\n upsert=True\n )\n\n self._local_tasks[task.id] = task\n if previous:\n self._events.publish(TaskUpdated(task_id=task.id))\n else:\n self._events.publish(TaskAdded(task_id=task.id))\n\n def remove_task(self, task_id: str) -> None:\n for attempt in self._retrying:\n with attempt:\n if not self._tasks.find_one_and_delete({'_id': task_id}):\n raise TaskLookupError(task_id)\n\n del self._local_tasks[task_id]\n self._events.publish(TaskRemoved(task_id=task_id))\n\n def get_task(self, task_id: str) -> Task:\n try:\n return self._local_tasks[task_id]\n except KeyError:\n for attempt in self._retrying:\n with attempt:\n document = self._tasks.find_one({'_id': task_id}, projection=self._task_attrs)\n\n if not document:\n raise TaskLookupError(task_id)\n\n document['id'] = document.pop('id')\n task = self._local_tasks[task_id] = Task.unmarshal(self.serializer, document)\n return task\n\n def get_tasks(self) -> list[Task]:\n for attempt in self._retrying:\n with attempt:\n tasks: list[Task] = []\n for document in self._tasks.find(projection=self._task_attrs,\n sort=[('_id', pymongo.ASCENDING)]):\n document['id'] = document.pop('_id')\n tasks.append(Task.unmarshal(self.serializer, document))\n\n return tasks\n\n def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:\n filters = {'_id': {'$in': list(ids)}} if ids is not None else {}\n for attempt in self._retrying:\n with attempt:\n schedules: list[Schedule] = []\n cursor = self._schedules.find(filters).sort('_id')\n for document in cursor:\n document['id'] = document.pop('_id')\n try:\n schedule = Schedule.unmarshal(self.serializer, document)\n except DeserializationError:\n self._logger.warning('Failed to deserialize schedule %r', document['_id'])\n continue\n\n schedules.append(schedule)\n\n return schedules\n\n def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:\n event: DataStoreEvent\n document = schedule.marshal(self.serializer)\n document['_id'] = document.pop('id')\n try:\n for attempt in self._retrying:\n with attempt:\n self._schedules.insert_one(document)\n except DuplicateKeyError:\n if conflict_policy is ConflictPolicy.exception:\n raise ConflictingIdError(schedule.id) from None\n elif conflict_policy is ConflictPolicy.replace:\n for attempt in self._retrying:\n with attempt:\n self._schedules.replace_one({'_id': schedule.id}, document, True)\n\n event = ScheduleUpdated(\n schedule_id=schedule.id,\n next_fire_time=schedule.next_fire_time)\n self._events.publish(event)\n else:\n event = ScheduleAdded(schedule_id=schedule.id,\n next_fire_time=schedule.next_fire_time)\n self._events.publish(event)\n\n def remove_schedules(self, ids: Iterable[str]) -> None:\n filters = {'_id': {'$in': list(ids)}} if ids is not None else {}\n for attempt in self._retrying:\n with attempt, self.client.start_session() as session:\n cursor = self._schedules.find(filters, projection=['_id'], session=session)\n ids = [doc['_id'] for doc in cursor]\n if ids:\n self._schedules.delete_many(filters, session=session)\n\n for schedule_id in ids:\n self._events.publish(ScheduleRemoved(schedule_id=schedule_id))\n\n def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:\n for attempt in self._retrying:\n with attempt, self.client.start_session() as session:\n schedules: list[Schedule] = []\n cursor = self._schedules.find(\n {'next_fire_time': {'$ne': None},\n '$or': [{'acquired_until': {'$exists': False}},\n {'acquired_until': {'$lt': datetime.now(timezone.utc)}}]\n },\n session=session\n ).sort('next_fire_time').limit(limit)\n for document in cursor:\n document['id'] = document.pop('_id')\n schedule = Schedule.unmarshal(self.serializer, document)\n schedules.append(schedule)\n\n if schedules:\n now = datetime.now(timezone.utc)\n acquired_until = datetime.fromtimestamp(\n now.timestamp() + self.lock_expiration_delay, now.tzinfo)\n filters = {'_id': {'$in': [schedule.id for schedule in schedules]}}\n update = {'$set': {'acquired_by': scheduler_id,\n 'acquired_until': acquired_until}}\n self._schedules.update_many(filters, update, session=session)\n\n return schedules\n\n def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:\n updated_schedules: list[tuple[str, datetime]] = []\n finished_schedule_ids: list[str] = []\n\n # Update schedules that have a next fire time\n requests = []\n for schedule in schedules:\n filters = {'_id': schedule.id, 'acquired_by': scheduler_id}\n if schedule.next_fire_time is not None:\n try:\n serialized_trigger = self.serializer.serialize(schedule.trigger)\n except SerializationError:\n self._logger.exception('Error serializing schedule %r – '\n 'removing from data store', schedule.id)\n requests.append(DeleteOne(filters))\n finished_schedule_ids.append(schedule.id)\n continue\n\n update = {\n '$unset': {\n 'acquired_by': True,\n 'acquired_until': True,\n },\n '$set': {\n 'trigger': serialized_trigger,\n 'next_fire_time': schedule.next_fire_time\n }\n }\n requests.append(UpdateOne(filters, update))\n updated_schedules.append((schedule.id, schedule.next_fire_time))\n else:\n requests.append(DeleteOne(filters))\n finished_schedule_ids.append(schedule.id)\n\n if requests:\n for attempt in self._retrying:\n with attempt, self.client.start_session() as session:\n self._schedules.bulk_write(requests, ordered=False, session=session)\n\n for schedule_id, next_fire_time in updated_schedules:\n event = ScheduleUpdated(schedule_id=schedule_id,\n next_fire_time=next_fire_time)\n self._events.publish(event)\n\n for schedule_id in finished_schedule_ids:\n self._events.publish(ScheduleRemoved(schedule_id=schedule_id))\n\n def get_next_schedule_run_time(self) -> datetime | None:\n for attempt in self._retrying:\n with attempt:\n document = self._schedules.find_one({'next_run_time': {'$ne': None}},\n projection=['next_run_time'],\n sort=[('next_run_time', ASCENDING)])\n\n if document:\n return document['next_run_time']\n else:\n return None\n\n def add_job(self, job: Job) -> None:\n document = job.marshal(self.serializer)\n document['_id'] = document.pop('id')\n for attempt in self._retrying:\n with attempt:\n self._jobs.insert_one(document)\n\n event = JobAdded(job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,\n tags=job.tags)\n self._events.publish(event)\n\n def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:\n filters = {'_id': {'$in': list(ids)}} if ids is not None else {}\n for attempt in self._retrying:\n with attempt:\n jobs: list[Job] = []\n cursor = self._jobs.find(filters).sort('_id')\n for document in cursor:\n document['id'] = document.pop('_id')\n try:\n job = Job.unmarshal(self.serializer, document)\n except DeserializationError:\n self._logger.warning('Failed to deserialize job %r', document['id'])\n continue\n\n jobs.append(job)\n\n return jobs\n\n def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:\n for attempt in self._retrying:\n with attempt, self.client.start_session() as session:\n cursor = self._jobs.find(\n {'$or': [{'acquired_until': {'$exists': False}},\n {'acquired_until': {'$lt': datetime.now(timezone.utc)}}]\n },\n sort=[('created_at', ASCENDING)],\n limit=limit,\n session=session\n )\n documents = list(cursor)\n\n # Retrieve the limits\n task_ids: set[str] = {document['task_id'] for document in documents}\n task_limits = self._tasks.find(\n {'_id': {'$in': list(task_ids)}, 'max_running_jobs': {'$ne': None}},\n projection=['max_running_jobs', 'running_jobs'],\n session=session\n )\n job_slots_left = {doc['_id']: doc['max_running_jobs'] - doc['running_jobs']\n for doc in task_limits}\n\n # Filter out jobs that don't have free slots\n acquired_jobs: list[Job] = []\n increments: dict[str, int] = defaultdict(lambda: 0)\n for document in documents:\n document['id'] = document.pop('_id')\n job = Job.unmarshal(self.serializer, document)\n\n # Don't acquire the job if there are no free slots left\n slots_left = job_slots_left.get(job.task_id)\n if slots_left == 0:\n continue\n elif slots_left is not None:\n job_slots_left[job.task_id] -= 1\n\n acquired_jobs.append(job)\n increments[job.task_id] += 1\n\n if acquired_jobs:\n now = datetime.now(timezone.utc)\n acquired_until = datetime.fromtimestamp(\n now.timestamp() + self.lock_expiration_delay, timezone.utc)\n filters = {'_id': {'$in': [job.id for job in acquired_jobs]}}\n update = {'$set': {'acquired_by': worker_id,\n 'acquired_until': acquired_until}}\n self._jobs.update_many(filters, update, session=session)\n\n # Increment the running job counters on each task\n for task_id, increment in increments.items():\n self._tasks.find_one_and_update(\n {'_id': task_id},\n {'$inc': {'running_jobs': increment}},\n session=session\n )\n\n # Publish the appropriate events\n for job in acquired_jobs:\n self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))\n\n return acquired_jobs\n\n def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:\n for attempt in self._retrying:\n with attempt, self.client.start_session() as session:\n # Insert the job result\n document = result.marshal(self.serializer)\n document['_id'] = document.pop('job_id')\n self._jobs_results.insert_one(document, session=session)\n\n # Decrement the running jobs counter\n self._tasks.find_one_and_update(\n {'_id': task_id},\n {'$inc': {'running_jobs': -1}},\n session=session\n )\n\n # Delete the job\n self._jobs.delete_one({'_id': result.job_id}, session=session)\n\n # Publish the event\n self._events.publish(\n JobReleased(job_id=result.job_id, worker_id=worker_id, outcome=result.outcome)\n )\n\n def get_job_result(self, job_id: UUID) -> JobResult | None:\n for attempt in self._retrying:\n with attempt:\n document = self._jobs_results.find_one_and_delete({'_id': job_id})\n\n if document:\n document['job_id'] = document.pop('_id')\n return JobResult.unmarshal(self.serializer, document)\n else:\n return None\n"},"repo_name":{"kind":"string","value":"agronholm/apscheduler"},"path":{"kind":"string","value":"src/apscheduler/datastores/mongodb.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":19667,"string":"19,667"}}},{"rowIdx":44297,"cells":{"code":{"kind":"string","value":"# coding=utf-8\n# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\n\nimport pytest\nimport functools\nfrom devtools_testutils.aio import recorded_by_proxy_async\nfrom azure.ai.formrecognizer._generated.models import AnalyzeResultOperation\nfrom azure.ai.formrecognizer.aio import DocumentAnalysisClient\nfrom azure.ai.formrecognizer import AnalyzeResult\nfrom preparers import FormRecognizerPreparer\nfrom asynctestcase import AsyncFormRecognizerTest\nfrom preparers import GlobalClientPreparer as _GlobalClientPreparer\n\n\nDocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient)\n\n\nclass TestDACAnalyzeDocumentAsync(AsyncFormRecognizerTest):\n\n def teardown(self):\n self.sleep(4)\n\n @FormRecognizerPreparer()\n @DocumentAnalysisClientPreparer()\n @recorded_by_proxy_async\n async def test_document_stream_transform_pdf(self, client):\n with open(self.invoice_pdf, \"rb\") as fd:\n document = fd.read()\n\n responses = []\n\n def callback(raw_response, _, headers):\n analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)\n extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)\n responses.append(analyze_result)\n responses.append(extracted_document)\n\n async with client:\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, cls=callback)\n result = await poller.result()\n raw_analyze_result = responses[0].analyze_result\n returned_model = responses[1]\n\n # Check AnalyzeResult\n assert returned_model.model_id == raw_analyze_result.model_id\n assert returned_model.api_version == raw_analyze_result.api_version\n assert returned_model.content == raw_analyze_result.content\n \n self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)\n self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)\n self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)\n self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)\n self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)\n self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)\n\n # check page range\n assert len(raw_analyze_result.pages) == len(returned_model.pages)\n\n @FormRecognizerPreparer()\n @DocumentAnalysisClientPreparer()\n @recorded_by_proxy_async\n async def test_document_stream_transform_jpg(self, client):\n with open(self.form_jpg, \"rb\") as fd:\n document = fd.read()\n\n responses = []\n\n def callback(raw_response, _, headers):\n analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)\n extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)\n responses.append(analyze_result)\n responses.append(extracted_document)\n\n async with client:\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, cls=callback)\n result = await poller.result()\n raw_analyze_result = responses[0].analyze_result\n returned_model = responses[1]\n\n # Check AnalyzeResult\n assert returned_model.model_id == raw_analyze_result.model_id\n assert returned_model.api_version == raw_analyze_result.api_version\n assert returned_model.content == raw_analyze_result.content\n \n self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)\n self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)\n self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)\n self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)\n self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)\n self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)\n\n # check page range\n assert len(raw_analyze_result.pages) == len(returned_model.pages)\n\n @FormRecognizerPreparer()\n @DocumentAnalysisClientPreparer()\n @recorded_by_proxy_async\n async def test_document_multipage_transform(self, client):\n with open(self.multipage_invoice_pdf, \"rb\") as fd:\n document = fd.read()\n\n responses = []\n\n def callback(raw_response, _, headers):\n analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)\n extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)\n responses.append(analyze_result)\n responses.append(extracted_document)\n\n async with client:\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, cls=callback)\n result = await poller.result()\n raw_analyze_result = responses[0].analyze_result\n returned_model = responses[1]\n\n # Check AnalyzeResult\n assert returned_model.model_id == raw_analyze_result.model_id\n assert returned_model.api_version == raw_analyze_result.api_version\n assert returned_model.content == raw_analyze_result.content\n \n self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)\n self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)\n self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)\n self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)\n self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)\n self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)\n\n # check page range\n assert len(raw_analyze_result.pages) == len(returned_model.pages)\n\n @pytest.mark.live_test_only\n @FormRecognizerPreparer()\n @DocumentAnalysisClientPreparer()\n @recorded_by_proxy_async\n async def test_document_multipage_table_span_pdf(self, client, **kwargs):\n\n with open(self.multipage_table_pdf, \"rb\") as fd:\n my_file = fd.read()\n async with client:\n poller = await client.begin_analyze_document(\"prebuilt-document\", my_file)\n document = await poller.result()\n assert len(document.tables) == 3\n assert document.tables[0].row_count == 30\n assert document.tables[0].column_count == 5\n assert document.tables[1].row_count == 6\n assert document.tables[1].column_count == 5\n assert document.tables[2].row_count == 23\n assert document.tables[2].column_count == 5\n\n @FormRecognizerPreparer()\n @DocumentAnalysisClientPreparer()\n @recorded_by_proxy_async\n async def test_document_specify_pages(self, client):\n with open(self.multipage_invoice_pdf, \"rb\") as fd:\n document = fd.read()\n\n async with client:\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, pages=\"1\")\n result = await poller.result()\n assert len(result.pages) == 1\n\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, pages=\"1, 3\")\n result = await poller.result()\n assert len(result.pages) == 2\n\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, pages=\"1-2\")\n result = await poller.result()\n assert len(result.pages) == 2\n\n poller = await client.begin_analyze_document(\"prebuilt-document\", document, pages=\"1-2, 3\")\n result = await poller.result()\n assert len(result.pages) == 3\n"},"repo_name":{"kind":"string","value":"Azure/azure-sdk-for-python"},"path":{"kind":"string","value":"sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":8184,"string":"8,184"}}},{"rowIdx":44298,"cells":{"code":{"kind":"string","value":"import os\nimport unittest\nimport xcube.core.store as xcube_store\n\nfrom cate.core.ds import DATA_STORE_POOL\n\n\ndef _create_test_data_store_config(name: str):\n local_test_store_path = \\\n os.path.join(os.path.dirname(__file__), 'ds', 'resources', 'datasources', name)\n local_test_store_dict = {\n \"store_id\": \"file\",\n \"store_params\": {\n \"root\": local_test_store_path\n },\n \"title\": f\"Local Test Store '{name}'\"\n }\n local_test_store = xcube_store.DataStoreConfig.from_dict(local_test_store_dict)\n return local_test_store\n\n\nclass StoreTest(unittest.TestCase):\n _orig_store_configs = None\n\n @classmethod\n def setUpClass(cls):\n cls._orig_store_configs = {instance_id: DATA_STORE_POOL.get_store_config(instance_id)\n for instance_id in DATA_STORE_POOL.store_instance_ids}\n for instance_id in DATA_STORE_POOL.store_instance_ids:\n DATA_STORE_POOL.remove_store_config(instance_id)\n DATA_STORE_POOL.add_store_config('local_test_store_1',\n _create_test_data_store_config('local'))\n DATA_STORE_POOL.add_store_config('local_test_store_2',\n _create_test_data_store_config('local2'))\n\n @classmethod\n def tearDownClass(cls):\n for instance_id in DATA_STORE_POOL.store_instance_ids:\n DATA_STORE_POOL.remove_store_config(instance_id)\n for instance_id, config in cls._orig_store_configs.items():\n DATA_STORE_POOL.add_store_config(instance_id, config)\n"},"repo_name":{"kind":"string","value":"CCI-Tools/cate-core"},"path":{"kind":"string","value":"tests/storetest.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":1590,"string":"1,590"}}},{"rowIdx":44299,"cells":{"code":{"kind":"string","value":"from gusto import *\nfrom gusto import thermodynamics\nfrom firedrake import (PeriodicIntervalMesh, ExtrudedMesh,\n SpatialCoordinate, conditional, cos, pi, sqrt, NonlinearVariationalProblem,\n NonlinearVariationalSolver, TestFunction, dx, TrialFunction, Constant, Function,\n LinearVariationalProblem, LinearVariationalSolver, DirichletBC,\n FunctionSpace, BrokenElement, VectorFunctionSpace)\nimport sys\n\nif '--recovered' in sys.argv:\n recovered = True\nelse:\n recovered = False\nif '--limit' in sys.argv:\n limit = True\nelse:\n limit = False\nif '--diffusion' in sys.argv:\n diffusion = True\nelse:\n diffusion = False\n\ndt = 1.0\nif '--running-tests' in sys.argv:\n tmax = 10.\n deltax = 1000.\nelse:\n deltax = 100. if recovered else 200\n tmax = 1000.\n\n\nL = 10000.\nH = 10000.\nnlayers = int(H/deltax)\nncolumns = int(L/deltax)\n\nm = PeriodicIntervalMesh(ncolumns, L)\nmesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)\ndegree = 0 if recovered else 1\n\nfieldlist = ['u', 'rho', 'theta']\ntimestepping = TimesteppingParameters(dt=dt, maxk=4, maxi=1)\n\ndirname = 'moist_bf'\n\nif recovered:\n dirname += '_recovered'\nif limit:\n dirname += '_limit'\nif diffusion:\n dirname += '_diffusion'\n\noutput = OutputParameters(dirname=dirname,\n dumpfreq=20,\n dumplist=['u'],\n perturbation_fields=[],\n log_level='INFO')\n\nparams = CompressibleParameters()\ndiagnostics = Diagnostics(*fieldlist)\ndiagnostic_fields = [Theta_e(), InternalEnergy(), Perturbation('InternalEnergy'), PotentialEnergy()]\n\nstate = State(mesh, vertical_degree=degree, horizontal_degree=degree,\n family=\"CG\",\n timestepping=timestepping,\n output=output,\n parameters=params,\n diagnostics=diagnostics,\n fieldlist=fieldlist,\n diagnostic_fields=diagnostic_fields)\n\n# Initial conditions\nu0 = state.fields(\"u\")\nrho0 = state.fields(\"rho\")\ntheta0 = state.fields(\"theta\")\nwater_v0 = state.fields(\"water_v\", theta0.function_space())\nwater_c0 = state.fields(\"water_c\", theta0.function_space())\nmoisture = [\"water_v\", \"water_c\"]\n\n# spaces\nVu = state.spaces(\"HDiv\")\nVt = state.spaces(\"HDiv_v\")\nVr = state.spaces(\"DG\")\nx, z = SpatialCoordinate(mesh)\nquadrature_degree = (4, 4)\ndxp = dx(degree=(quadrature_degree))\n\n# Define constant theta_e and water_t\nTsurf = 320.0\ntotal_water = 0.02\ntheta_e = Function(Vt).assign(Tsurf)\nwater_t = Function(Vt).assign(total_water)\n\n# Calculate hydrostatic fields\nsaturated_hydrostatic_balance(state, theta_e, water_t)\n\n# make mean fields\ntheta_b = Function(Vt).assign(theta0)\nrho_b = Function(Vr).assign(rho0)\nwater_vb = Function(Vt).assign(water_v0)\nwater_cb = Function(Vt).assign(water_t - water_vb)\npibar = thermodynamics.pi(state.parameters, rho_b, theta_b)\nTb = thermodynamics.T(state.parameters, theta_b, pibar, r_v=water_vb)\nIbar = state.fields(\"InternalEnergybar\", Vt, dump=False)\nIbar.interpolate(thermodynamics.internal_energy(state.parameters, rho_b, Tb, r_v=water_vb, r_l=water_cb))\n\n# define perturbation\nxc = L / 2\nzc = 2000.\nrc = 2000.\nTdash = 2.0\nr = sqrt((x - xc) ** 2 + (z - zc) ** 2)\ntheta_pert = Function(Vt).interpolate(conditional(r > rc,\n 0.0,\n Tdash * (cos(pi * r / (2.0 * rc))) ** 2))\n\n# define initial theta\ntheta0.assign(theta_b * (theta_pert / 300.0 + 1.0))\n\n# find perturbed rho\ngamma = TestFunction(Vr)\nrho_trial = TrialFunction(Vr)\na = gamma * rho_trial * dxp\nL = gamma * (rho_b * theta_b / theta0) * dxp\nrho_problem = LinearVariationalProblem(a, L, rho0)\nrho_solver = LinearVariationalSolver(rho_problem)\nrho_solver.solve()\n\nphysics_boundary_method = Boundary_Method.physics if recovered else None\n\n# find perturbed water_v\nw_v = Function(Vt)\nphi = TestFunction(Vt)\nrho_averaged = Function(Vt)\nrho_recoverer = Recoverer(rho0, rho_averaged,\n VDG=FunctionSpace(mesh, BrokenElement(Vt.ufl_element())),\n boundary_method=physics_boundary_method)\nrho_recoverer.project()\n\npi = thermodynamics.pi(state.parameters, rho_averaged, theta0)\np = thermodynamics.p(state.parameters, pi)\nT = thermodynamics.T(state.parameters, theta0, pi, r_v=w_v)\nw_sat = thermodynamics.r_sat(state.parameters, T, p)\n\nw_functional = (phi * w_v * dxp - phi * w_sat * dxp)\nw_problem = NonlinearVariationalProblem(w_functional, w_v)\nw_solver = NonlinearVariationalSolver(w_problem)\nw_solver.solve()\n\nwater_v0.assign(w_v)\nwater_c0.assign(water_t - water_v0)\n\n# initialise fields\nstate.initialise([('u', u0),\n ('rho', rho0),\n ('theta', theta0),\n ('water_v', water_v0),\n ('water_c', water_c0)])\nstate.set_reference_profiles([('rho', rho_b),\n ('theta', theta_b),\n ('water_v', water_vb)])\n\n\n# set up limiter\nif limit:\n if recovered:\n limiter = VertexBasedLimiter(VDG1)\n else:\n limiter = ThetaLimiter(Vt)\nelse:\n limiter = None\n\n\n# Set up advection schemes\nif recovered:\n VDG1 = state.spaces(\"DG1\")\n VCG1 = FunctionSpace(mesh, \"CG\", 1)\n Vt_brok = FunctionSpace(mesh, BrokenElement(Vt.ufl_element()))\n Vu_DG1 = VectorFunctionSpace(mesh, VDG1.ufl_element())\n Vu_CG1 = VectorFunctionSpace(mesh, \"CG\", 1)\n\n u_opts = RecoveredOptions(embedding_space=Vu_DG1,\n recovered_space=Vu_CG1,\n broken_space=Vu,\n boundary_method=Boundary_Method.dynamics)\n rho_opts = RecoveredOptions(embedding_space=VDG1,\n recovered_space=VCG1,\n broken_space=Vr,\n boundary_method=Boundary_Method.dynamics)\n theta_opts = RecoveredOptions(embedding_space=VDG1,\n recovered_space=VCG1,\n broken_space=Vt_brok)\n\n ueqn = EmbeddedDGAdvection(state, Vu, equation_form=\"advective\", options=u_opts)\n rhoeqn = EmbeddedDGAdvection(state, Vr, equation_form=\"continuity\", options=rho_opts)\n thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form=\"advective\", options=theta_opts)\nelse:\n ueqn = VectorInvariant(state, Vu)\n rhoeqn = AdvectionEquation(state, Vr, equation_form=\"continuity\")\n thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form=\"advective\", options=EmbeddedDGOptions())\n\nu_advection = ('u', SSPRK3(state, u0, ueqn)) if recovered else ('u', ThetaMethod(state, u0, ueqn))\n\nadvected_fields = [u_advection,\n ('rho', SSPRK3(state, rho0, rhoeqn)),\n ('theta', SSPRK3(state, theta0, thetaeqn, limiter=limiter)),\n ('water_v', SSPRK3(state, water_v0, thetaeqn, limiter=limiter)),\n ('water_c', SSPRK3(state, water_c0, thetaeqn, limiter=limiter))]\n\n# Set up linear solver\nlinear_solver = CompressibleSolver(state, moisture=moisture)\n\n# Set up forcing\ncompressible_forcing = CompressibleForcing(state, moisture=moisture)\n\n# diffusion\nbcs = [DirichletBC(Vu, 0.0, \"bottom\"),\n DirichletBC(Vu, 0.0, \"top\")]\n\ndiffused_fields = []\n\nif diffusion:\n diffused_fields.append(('u', InteriorPenalty(state, Vu, kappa=Constant(60.),\n mu=Constant(10./deltax), bcs=bcs)))\n\n# define condensation\nphysics_list = [Condensation(state)]\n\n# build time stepper\nstepper = CrankNicolson(state, advected_fields, linear_solver,\n compressible_forcing, physics_list=physics_list,\n diffused_fields=diffused_fields)\n\nstepper.run(t=0, tmax=tmax)\n"},"repo_name":{"kind":"string","value":"firedrakeproject/gusto"},"path":{"kind":"string","value":"examples/moist_bf_bubble.py"},"language":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"size":{"kind":"number","value":7797,"string":"7,797"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":442,"numItemsPerPage":100,"numTotalItems":45001,"offset":44200,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc2MzA5OTY3Mywic3ViIjoiL2RhdGFzZXRzL3RoZW90aGVydG9tL2NvZGVwYXJyb3QtcHl0aG9uLW9ubHkiLCJleHAiOjE3NjMxMDMyNzMsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.Ax0cuAj6YOO36AEA2Jh157qsZgXdppQhcrCq0trdJIpTgycwI6CARtg4u44W0RKjIGmx3okNcUpOnQXYcW8aCg","displayUrls":true,"splitSizeSummaries":[{"config":"default","split":"train","numRows":45001,"numBytesParquet":127971653},{"config":"default","split":"validation","numRows":5000,"numBytesParquet":13585843}]},"discussionsStats":{"closed":1,"open":1,"total":2},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
code
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
226
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ApplicationGatewaySku(Model): """SKU of an application gateway. :param name: Name of an application gateway SKU. Possible values include: 'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', 'WAF_Large' :type name: str or ~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySkuName :param tier: Tier of an application gateway. Possible values include: 'Standard', 'WAF' :type tier: str or ~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayTier :param capacity: Capacity (instance count) of an application gateway. :type capacity: int """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, 'capacity': {'key': 'capacity', 'type': 'int'}, } def __init__(self, **kwargs): super(ApplicationGatewaySku, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.tier = kwargs.get('tier', None) self.capacity = kwargs.get('capacity', None)
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_sku.py
Python
mit
1,567
import socket,select,sys,time from errors import * from communicate import SendData, ReceiveData, ReceiveDataUDP class TCPServer(): def __init__(self): self.sending_socket = None def input_func(self,sock,host,port,address):pass def output_func(self,sock,host,port,address):pass def connect_func(self,sock,host,port):pass def client_connect_func(self,sock,host,port,address):pass def client_disconnect_func(self,sock,host,port,address):pass def quit_func(self,host,port):pass def connect(self,host,port): self.host = host self.port = port try: self.unconnected_socket = socket.socket() self.unconnected_socket.bind((self.host,self.port)) self.unconnected_socket.listen(5) except: self.unconnected_socket.close() raise ServerError("Only one instance of the server on port "+str(self.port)+" may run at one time!") self.connect_func(self.unconnected_socket,self.host,self.port) self.connected_sockets = [] self.socketaddresses = {} def remove_socket(self,sock): address = self.socketaddresses[sock] self.client_disconnect_func(sock,self.host,self.port,address) self.connected_sockets.remove(sock) def serve_forever(self): self.looping = True while self.looping: input_ready,output_ready,except_ready = select.select([self.unconnected_socket]+self.connected_sockets,[],[]) for sock in input_ready: if sock == self.unconnected_socket: #init socket connected_socket, address = sock.accept() self.connected_sockets.append(connected_socket) self.socketaddresses[connected_socket] = address self.client_connect_func(connected_socket,self.host,self.port,address) else: try: data = ReceiveData(sock) address = self.socketaddresses[sock] self.input_func(sock,self.host,self.port,address) except: data = "client quit" if data != None: if data == "client quit": self.remove_socket(sock) continue self.sending_socket = sock self.handle_data(data) def handle_data(self,data): pass def send_data(self,data,compress=False): try: SendData(self.sending_socket,data,compress,includelength=True) address = self.socketaddresses[self.sending_socket] self.output_func(self.sending_socket,self.host,self.port,address) except: self.remove_socket(self.sending_socket) def quit(self): for s in self.connected_sockets: s.close() self.quit_func(self.host,self.port) class UDPServer(): def __init__(self): self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) def input_func(self,sock,host,port,address):pass def output_func(self,sock,host,port,address):pass def connect_func(self,sock,host,port):pass def quit_func(self,host,port):pass def connect(self,host,port): self.host = host self.port = port try: self.socket.bind((host, port)) except: self.socket.close() raise ServerError("Only one instance of the server on port "+str(self.port)+" may run at one time!") self.connect_func(self.socket,self.host,self.port) def serve_forever(self): self.looping = True while self.looping: data,self.lastaddress = ReceiveDataUDP(self.socket) self.input_func(self.socket,self.host,self.port,self.lastaddress) self.handle_data(data) def handle_data(self,data): pass def send_data(self,data,compress=False): try: SendData(self.socket,data,compress,address=self.lastaddress) self.output_func(self.socket,self.host,self.port,self.lastaddress) except: pass #client disconnected def quit(self): self.socket.close() self.quit_func(self.host,self.port) class TCPClient: def __init__(self): pass def connect(self,host,port): self.host = host self.port = port try: self.socket = socket.socket() self.socket.connect((self.host,self.port)) except: self.socket.close() raise SocketError("The connection could not be opened. It must be created first with a server object.") def send_data(self,data,compress=False): SendData(self.socket,data,compress,includelength=True) def wait_for_data(self): input_ready,output_ready,except_ready = select.select([self.socket],[],[]) return ReceiveData(self.socket) def check_for_data(self): input_ready,output_ready,except_ready = select.select([self.socket],[],[],0.001) if len(input_ready) > 0: return ReceiveData(self.socket) def quit(self): self.socket.close() class UDPClient: def __init__(self): pass def connect(self,host,port): self.host = host self.port = port self.socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) self.socket.connect((self.host,self.port)) def send_data(self,data,compress=False): SendData(self.socket,data,compress) def wait_for_data(self): input_ready,output_ready,except_ready = select.select([self.socket],[],[]) return ReceiveDataUDP(self.socket)[0] def check_for_data(self): input_ready,output_ready,except_ready = select.select([self.socket],[],[],0.001) if len(input_ready) > 0: return ReceiveDataUDP(self.socket)[0] def quit(self): self.socket.close()
remremrem/EV-Tribute
world/Net/netbase.py
Python
mit
6,335
"""navitron_system_kills.py: cronjob for snapshotting /universe/system_kills/""" from os import path from datetime import datetime import warnings import pandas as pd import retry import navitron_crons.exceptions as exceptions import navitron_crons.connections as connections import navitron_crons._version as _version import navitron_crons.cli_core as cli_core HERE = path.abspath(path.dirname(__file__)) __app_version__ = _version.__version__ __app_name__ = 'navitron_system_stats' def get_system_jumps( config, logger=cli_core.DEFAULT_LOGGER ): """fetches system jump information from ESI Args: config (:obj:`ProsperConfig`): config with [ENDPOINTS] logger (:obj:`logging.logger`, optional): logging handle Returns: :obj:`pandas.DataFrame`: parsed data """ logger.info('--fetching data from ESI') raw_data = connections.get_esi( config.get('ENDPOINTS', 'source'), config.get('ENDPOINTS', 'system_jumps'), logger=logger ) logger.info('--parsing data into pandas') system_jumps_df = pd.DataFrame(raw_data) logger.debug(system_jumps_df.head(5)) return system_jumps_df def get_system_kills( config, logger=cli_core.DEFAULT_LOGGER ): """fetches system kills information from ESI Args: config (:obj:`ProsperConfig`): config with [ENDPOINTS] logger (:obj:`logging.logger`, optional): logging handle Returns: :obj:`pandas.DataFrame`: parsed data """ logger.info('--fetching data from ESI') raw_data = connections.get_esi( config.get('ENDPOINTS', 'source'), config.get('ENDPOINTS', 'system_kills'), logger=logger ) logger.info('--parsing data into pandas') system_kills_df = pd.DataFrame(raw_data) logger.debug(system_kills_df.head(5)) return system_kills_df class NavitronSystemStats(cli_core.NavitronApplication): """fetch and store /universe/system_kills/ & /universe/system_jumps Feel free to add script-specific args/vars """ PROGNAME = __app_name__ VERSION = __app_version__ def main(self): """application runtime""" self.load_logger(self.PROGNAME) self.conn = connections.MongoConnection( self.config, logger=self.logger # note: order specific, logger may not be loaded yet ) self.logger.info('HELLO WORLD') self.logger.info('Fetching system info: Jumps') try: # only retry first call. Fail otherwise system_jumps_df = retry.api.retry_call( get_system_jumps, fkwargs={ 'config': self.config, 'logger':self.logger }, tries=3, delay=300 ) except Exception: # pragma: no cover self.logger.error( '%s: Unable to fetch system_jumps', self.PROGNAME, exc_info=True ) raise self.logger.info('Fetching system info: Kills') try: system_kills_df = get_system_kills( config=self.config, logger=self.logger ) except Exception: # pramga: no cover self.logger.error( '%s: Unable to fetch system_kills', self.PROGNAME, exc_info=True ) raise self.logger.info('Merging system info') system_info_df = system_jumps_df.merge( system_kills_df, on='system_id' ) self.logger.info('Appending Metadata') metadata_obj = cli_core.generate_metadata( self.PROGNAME, self.VERSION ) system_info_df['cron_datetime'] = metadata_obj['cron_datetime'] system_info_df['write_recipt'] = metadata_obj['write_recipt'] self.logger.debug(system_info_df.head(5)) self.logger.info('Pushing data to database') try: connections.dump_to_db( system_info_df, self.PROGNAME, self.conn, debug=self.debug, logger=self.logger ) connections.write_provenance( metadata_obj, self.conn, debug=self.debug, logger=self.logger ) except Exception: self.logger.error( '%s: Unable to write data to database -- Attempting to write to disk', self.PROGNAME, exc_info=True ) try: file_name = connections.dump_to_db( system_info_df, self.PROGNAME, self.conn, debug=True, logger=self.logger ) except Exception: # pramga: no cover self.logger.critical( '%s: UNABLE TO SAVE DATA', self.PROGNAME, exc_info=True ) raise self.logger.error( '%s: saved data safely to disk: %s', self.PROGNAME, file_name ) self.logger.info('%s: Complete -- Have a nice day', self.PROGNAME) def run_main(): """hook for running entry_points""" NavitronSystemStats.run() if __name__ == '__main__': run_main()
j9ac9k/NavitronEve
crons/navitron_crons/navitron_system_stats.py
Python
mit
5,545
<<<<<<< HEAD <<<<<<< HEAD # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) """ # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Comma, Name, Call class FixExec(fixer_base.BaseFix): BM_compatible = True PATTERN = """ exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > """ def transform(self, node, results): assert results syms = self.syms a = results["a"] b = results.get("b") c = results.get("c") args = [a.clone()] args[0].prefix = "" if b is not None: args.extend([Comma(), b.clone()]) if c is not None: args.extend([Comma(), c.clone()]) return Call(Name("exec"), args, prefix=node.prefix) ======= # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) """ # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Comma, Name, Call class FixExec(fixer_base.BaseFix): BM_compatible = True PATTERN = """ exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > """ def transform(self, node, results): assert results syms = self.syms a = results["a"] b = results.get("b") c = results.get("c") args = [a.clone()] args[0].prefix = "" if b is not None: args.extend([Comma(), b.clone()]) if c is not None: args.extend([Comma(), c.clone()]) return Call(Name("exec"), args, prefix=node.prefix) >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) """ # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Comma, Name, Call class FixExec(fixer_base.BaseFix): BM_compatible = True PATTERN = """ exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > """ def transform(self, node, results): assert results syms = self.syms a = results["a"] b = results.get("b") c = results.get("c") args = [a.clone()] args[0].prefix = "" if b is not None: args.extend([Comma(), b.clone()]) if c is not None: args.extend([Comma(), c.clone()]) return Call(Name("exec"), args, prefix=node.prefix) >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
ArcherSys/ArcherSys
Lib/lib2to3/fixes/fix_exec.py
Python
mit
3,143
import mallet.hmm as h_mm import mallet.state as state # emissions def emissions(): return [ {'A': 0.25, 'B': 0.25, 'C': 0.5}, {'A': 0.55, 'B': 0.15, 'C': 0.3}, {'A': 0.675, 'B': 0.20, 'C': 0.125}, {'B': 0.5, 'C': 0.5}, {'A': 0.0, 'B': 0.5, 'C': 0.5} ] def invalid_emissions(): return [ {'A': 0.5, 'B': 0.25, 'C': 0.10} ] # states def state_params(): emissions_list = emissions() return [ (1, 'Begin', 'BEGIN', {}), (2, 'State1', 'S', emissions_list[0]), (3, 'State2', 'T', emissions_list[1]), (4, 'State3', 'U', emissions_list[2]), (5, 'End', 'END', {}), ] def states(): state_param_list = state_params() return dict((params[0], state.State(*params)) for params in state_param_list) # transitions def transitions(state_list = None): if state_list is None: state_list = states() return { 1: { state_list[2]: 1.0 }, 2: { state_list[2]: 0.5, state_list[3]: 0.5 }, 3: { state_list[3]: 0.75, state_list[4]: 0.25 }, 4: { state_list[4]: 0.15, state_list[5]: 0.85 }, 5: {} } def fake_transitions(state_list = None): if state_list is None: state_list = states() return { 1: { state_list[2]: 1.0, state_list[3]: 0.0 } } def states_with_transitions(): states_with_transitions = states() transition_list = transitions(states_with_transitions) for name, state in states_with_transitions.iteritems(): state.transitions = transition_list[state.id_num] return states_with_transitions def hmm(): return h_mm.HMM(states_with_transitions())
undeadpixel/mallet
test/fixtures/hmm_fixtures.py
Python
mit
1,846
import urllib.request, urllib def get_challenge(s): return urllib.request.urlopen('http://www.pythonchallenge.com/pc/' + s).read() src = get_challenge('def/equality.html') count = 0 lcount = 0 ucount = 0 print (src) for i in src: if i == i.uppercase(): count += 1 if count == 3 and i == i.lowercase() and ucount == 0: lcount += 1 jew = i if count == 3 and i == i.uppercase() and lcount == 1: ucount += 1 if count > 3 or lcount > 1 or ucount > 3: count = 0 lcount = 0 ucount = 0 print (jew)
lockout87/Projects
Python/bodyguards.py
Python
mit
510
template = '%.2f'%(fh_21)
shlopack/cursovaya
template/f_h_21.py
Python
mit
25
# -*- coding:utf-8 -*- import math # 这个开始我还没有考虑dp什么那么多,自己想没有想出来,看了网上讨论. # 假设台阶总是为100阶 # 分为51种情况,有0次两个台阶,有1次上两个台阶...有50次上两个台阶. # 没有实现,太麻烦 # def count_stairs(n): # if n % 2 == 0: # count_2 = n / 2 + 1 # start = 3 # else: # count_2 = (n + 1) / 2 + 1 # start = 2 # # counts = 1 # for i in xrange(count_2): # for j in xrange(start, i): # counts *= c(n - 2 * j, 1) # counts + count_stairs(n - 1) # counts += 2 if start == 3 else 1 # return counts # # # def c(m, n): # return math.factorial(m) / math.factorial(n) ** 2 def count_stairs_dp(n): f1 = 1 f2 = 2 fn = 0 if n == 1: return f1 if n == 2: return f2 for i in xrange(n): fn = f1 + f2 f1 = f2 f2 = fn return fn print(count_stairs_dp(10))
xudongyangwork/algo
day8/xudy.py
Python
mit
986
from random import choice class RandomWalk(): """class to generate random walks.""" def __init__(self, num_points=5000): """Initialize attributes of a walk""" self.num_points = num_points #All walks start from (0, 0) self.x_values = [0] self.y_values = [0] def fill_walk(self): """Calculate all the points in the walk""" #keep taking steps till desired length has been reached while len(self.x_values) < self.num_points: #decide which direction to take and how far to go x_direction = choice([1, -1]) x_distance = choice([0, 1, 2, 3, 4]) x_step = x_direction * x_distance y_direction = choice([1, -1]) y_distance = choice([0, 1, 2, 3, 4]) y_step = y_direction * y_distance #reject moves that go nowhere if x_step == 0 and y_step == 0: continue #calculate the next X and Y values next_x = self.x_values[-1] + x_step next_y = self.y_values[-1] + y_step self.x_values.append(next_x) self.y_values.append(next_y)
4bic-attic/data_viz
random_walk.py
Python
mit
1,179
import json import random from uuid import uuid1 from collections import defaultdict import requests schema_url = "http://localhost:5000/api/schema/" experiment_url = "http://localhost:5000/api/sample/" headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} binary_schema = { "default": 0, "distribution": "binary", "params": {} } def uuid(): return str(uuid1()) def gen_schema(keys): return {key: binary_schema for key in keys} def objective_function(a, b, **kwargs): return (0.5 + random.uniform(0, 1) + 3 * a + 2 * b - 5 * (a * b)) keys = ['a', 'b'] experiment = "binary" # uuid() experiment_url += experiment schema_url += experiment schema = gen_schema(keys) requests.post(schema_url, data=json.dumps(schema), headers=headers) distribution = defaultdict(lambda: 0) while True: r = requests.get(experiment_url) in_data = r.json() distribution[(in_data["a"], in_data["b"])] += 1 print in_data print distribution out_data = dict(_id = in_data["_id"], _obj=objective_function(**in_data)) print out_data r = requests.post(experiment_url, data=json.dumps(out_data), headers=headers)
diogo149/simbo
webapp/demo/binary.py
Python
mit
1,214
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-27 19:23 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0007_auto_20170327_1919'), ] operations = [ migrations.AlterField( model_name='post', name='image', field=models.ImageField(blank=True, null=True, upload_to=''), ), ]
jokuf/hack-blog
posts/migrations/0008_auto_20170327_1923.py
Python
mit
469
def makex(): threesixty(.5, -.5, 0.835) wait(.2) forward(1, 0.806) wait(.1) backward(1, 0.403) wait(.1) threesixty(1, -1, 0.835) wait(.1) forward(1, 0.403) wait(.1) backward(1, 0.806) wait(.2) threesixty(.5, -.5, 0.835) wait(.2) forward(1, .57) wait(.2) threesixty(1, -1, 1.6478) stop() ######################################################### # need to modify depending on where we want to draw x # and where we want the bot to finish # add a ccw or cw in function?
pbardea/scribbler
finalPrj/notUsed/makex.py
Python
mit
606
#!/usr/bin/env python # by TR from obspy.core import UTCDateTime as UTC from sito.data import IPOC from sito.noisexcorr import (prepare, get_correlations, plotXcorrs, noisexcorrf, stack) from sito import util import matplotlib.pyplot as plt from sito.stream import read from multiprocessing import Pool import time from sito import seismometer def main(): stations = 'PB01 PB02 PB03 PB04 PB05 PB06 PB07 PB08 HMBCX MNMCX PATCX PSGCX LVC' #stations = 'PB09 PB10 PB11 PB12 PB13 PB14 PB15 PB16' stations2 = None components = 'Z' # TOcopilla earthquake: 2007-11-14 15:14 t1 = UTC('2006-02-01') t2 = UTC('2012-10-01') shift = 100 correlations = get_correlations(stations, components, stations2, only_auto=True) method = 'FINAL_filter1-3_1bit_auto' data = IPOC(xcorr_append='/' + method, use_local_LVC=False) data.setXLogger('_' + method) # pool = Pool() # prepare(data, stations.split(), t1, t2, component=components, # filter=(1, 3, 2, True), downsample=20, # eventremoval='waterlevel_env2', param_removal=(10, 0), # whitening=False, # normalize='1bit', param_norm=None, # pool=pool) # noisexcorrf(data, correlations, t1, t2, shift, pool=pool) # pool.close() # pool.join() # plotXcorrs(data, correlations, t1, t2, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False, # plot_stack=True, plot_psd=False, add_to_title='', downsample=None) plt.rc('font', size=16) plotXcorrs(data, correlations, t1, t2, start=0, end=20, plot_overview=True, plot_years=False, use_dlognorm=False, plot_stack=True, plot_psd=False, downsample=None, ext='_hg_dis.pdf', vmax=0.1, ylabel=None, add_to_title='1-3Hz') # stack(data, correlations, dt= -1) # stack(data, correlations, dt=10 * 24 * 3600, shift=2 * 24 * 3600) # plotXcorrs(data, correlations, t1=None, t2=None, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False, # plot_stack=True, plot_psd=False, add_to_title='', downsample=None, # stack=('10days', '2days')) # plotXcorrs(data, correlations, t1=None, t2=None, start=0, end=20, plot_overview=True, plot_years=False, use_dlognorm=False, # plot_stack=True, plot_psd=False, add_to_title='', downsample=None, # stack=('10days', '2days'), ext='_hg.png', vmax=0.1) # util.checkDir(data.getPlotX(('', ''), t1)) #for correlation in correlations: # stations = correlation[0][:-1], correlation[1][:-1] # dist = data.stations.dist(*stations) ## if dist >= 120: ## t = (dist // 100) * 50 + 50 ## else: ## t = 70 # t = 200 # stream = data.readDayXcorr(correlation, t1, t2) # if len(stream) > 0: # stream.plotXcorr(-t, t, imshow=True, vmax=0.01, vmin_rel='vmax', # fig=plt.figure(figsize=(8.267, 11.693)), # figtitle='station ' + method + ' around Tocopilla event', # dateformatter='%y-%m-%d', show=False, # save=data.getPlotX(correlation, 'Tocopilla_0.01.png'), # stack_lim=None) # # method = 'rm5_filter0.1-1' # data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True) # data.setXLogger('_' + method) # prepare(data, stations.split(' '), t1, t2, filter=(0.1, 1.), downsample=10, # component=components, normalize='runningmean', norm_param=5 * 10 + 1, # use_floating_stream=True) # xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True) # plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method) # # # method = 'rm50_filter0.01' # data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True) # data.setXLogger('_' + method) # prepare(data, stations.split(' '), t1, t2, filter=(0.01, None), downsample=None, # component=components, normalize='runningmean', norm_param=50 * 100 + 1, # use_floating_stream=True) # xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True) # plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method) # # # method = 'rm0.25_filter2' # data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True) # data.setXLogger('_' + method) # prepare(data, stations.split(' '), t1, t2, filter=(2, None), downsample=None, # component=components, normalize='runningmean', norm_param=100 // 4 + 1, # use_floating_stream=True) # xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True) # plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method) if __name__ == '__main__': main()
trichter/sito
bin/noise/noise_s_final_autocorr2.py
Python
mit
5,012
# -*- coding: utf-8 -*- """ pid - example of PID control of a simple process with a time constant Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net RocketRedNeck and MIT Licenses RocketRedNeck hereby grants license for others to copy and modify this source code for whatever purpose other's deem worthy as long as RocketRedNeck is given credit where where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import matplotlib.pyplot as plot import numpy as np import math tmax = 3.0 dt = 0.01 ts = np.arange(0.0, tmax, dt) pvs = np.zeros(len(ts)) sps = np.zeros(len(ts)) mvs = np.zeros(len(ts)) mps = np.zeros(len(ts)) kf = 0.0 kp = 20.0 #10.0 ki = 0.0 kd = 2.0 #1.0 dt = ts[1] - ts[0] Gp = 1.0 delay = 1 * dt tau = 1000 * dt sp_period = 1.0 err = 0.0 intErr = 0.0 lastErr = 0.0 lastT = ts[0] lastG = 0.0 i = 0 d = 0 exp = -np.exp(-1/tau) mp = 0 for t in ts: if (t > 0): sps[i] = math.sin(sp_period*t) sps[i] = sps[i] / abs(sps[i]) # Square wave else: sps[i] = 0 derr = err - lastErr intErr = intErr + err mv = kf*sps[i] + (kp * err) + (ki * intErr) + (kd * (derr/dt)) mvs[i] = mv mp = mp + (mv * dt) mps[i] = mp G = 0.0 if (t >= delay): G = mp * Gp * (1.0 + exp) - (lastG * exp) else: d += 1 pvs[i] = G lastG = G i += 1 lastErr = err err = 0.0 if (t >= delay): err = sps[i-d] - pvs[i-d] # err += np.random.randn(1)*0.09 plot.figure(1) plot.cla() plot.grid() plot.plot(ts,sps,ts,pvs)
RocketRedNeck/PythonPlayground
pid_dot.py
Python
mit
2,665
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-10-26 14:20 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Occupancy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('room_name', models.CharField(max_length=255)), ('occupancy', models.IntegerField()), ('timestamp', models.DateField()), ], ), ]
christianknu/eitu
eitu/migrations/0001_initial.py
Python
mit
662
import yaml from core.AbstractConfig import AbstractConfig class YamlConfig(AbstractConfig): def __init__(self, config_path): self.path = config_path with open(config_path) as fr: self.conf = yaml.load(fr) pass def get_dict(self): return self.conf
john123951/gold.icbc.watcher
src/util/YamlConfig.py
Python
mit
304
#!/usr/bin/python2 # Copyright (C) 2011 by Ondrej Martinak <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys from src.mainWindow import * if __name__=="__main__": app = QtGui.QApplication(sys.argv) wnd = MainWindow() wnd.show() sys.exit(app.exec_())
omartinak/cfree
cfree.py
Python
mit
1,303
""" Basic IRT support for assessment items """ from dlkit.json_.osid.metadata import Metadata from dlkit.abstract_osid.assessment import record_templates as abc_assessment_records from dlkit.abstract_osid.osid.errors import IllegalState, InvalidArgument from ...osid.base_records import DecimalValuesRecord,\ DecimalValuesFormRecord,\ QueryInitRecord class ItemDecimalValuesRecord(abc_assessment_records.ItemRecord, DecimalValuesRecord): """actual assessment item record""" _implemented_record_type_identifiers = [ 'item-decimal-values' ] class ItemDecimalValuesFormRecord(DecimalValuesFormRecord, abc_assessment_records.ItemFormRecord): """an assessment item with decimal values attached""" _implemented_record_type_identifiers = [ 'item-decimal-values' ] def __init__(self, osid_object_form=None): if osid_object_form is not None: self.my_osid_object_form = osid_object_form self._init_metadata() if not self.my_osid_object_form.is_for_update(): self._init_map() super(ItemDecimalValuesFormRecord, self).__init__(osid_object_form=osid_object_form) class IRTItemRecord(DecimalValuesRecord): """include 3 basic IRT values""" _implemented_record_type_identifiers = [ 'irt-item', 'item-decimal-values' ] def has_difficulty_value(self): """stub""" return 'difficulty' in self.my_osid_object._my_map['decimalValues'] def get_difficulty_value(self): """stub""" if self.has_difficulty_value(): return self.my_osid_object._my_map['decimalValues']['difficulty'] raise IllegalState() def has_discrimination_value(self): """stub""" return 'discrimination' in self.my_osid_object._my_map['decimalValues'] def get_discrimination_value(self): """stub""" if self.has_discrimination_value(): return self.my_osid_object._my_map['decimalValues']['discrimination'] raise IllegalState() def has_pseudo_guessing_value(self): """stub""" return 'pseudoGuessing' in self.my_osid_object._my_map['decimalValues'] def get_pseudo_guessing_value(self): """stub""" if self.has_pseudo_guessing_value(): return self.my_osid_object._my_map['decimalValues']['pseudoGuessing'] raise IllegalState() difficulty = property(fget=get_difficulty_value) discrimination = property(fget=get_discrimination_value) guessing = property(fget=get_pseudo_guessing_value) class IRTItemFormRecord(ItemDecimalValuesFormRecord): """form to create / update the 3 IRT values we support""" _implemented_record_type_identifiers = [ 'irt-item', 'item-decimal-values' ] def __init__(self, osid_object_form=None): if osid_object_form is not None: self.my_osid_object_form = osid_object_form self._init_metadata() if not self.my_osid_object_form.is_for_update(): self._init_map() super(IRTItemFormRecord, self).__init__(osid_object_form=osid_object_form) def _init_map(self): """stub""" super(IRTItemFormRecord, self)._init_map() self.my_osid_object_form._my_map['decimalValues']['difficulty'] = \ self._decimal_value_metadata['default_decimal_values'][1] self.my_osid_object_form._my_map['decimalValues']['discrimination'] = \ self._decimal_value_metadata['default_decimal_values'][1] self.my_osid_object_form._my_map['decimalValues']['pseudoGuessing'] = \ self._decimal_value_metadata['default_decimal_values'][1] def get_difficulty_value_metadata(self): """stub""" return Metadata(**self._decimal_value_metadata) def get_discrimination_value_metadata(self): """stub""" return Metadata(**self._decimal_value_metadata) def get_pseudo_guessing_value_metadata(self): """stub""" return Metadata(**self._decimal_value_metadata) def set_difficulty_value(self, difficulty): """stub""" if not isinstance(difficulty, float): raise InvalidArgument('difficulty value must be a decimal') self.add_decimal_value(difficulty, 'difficulty') def set_discrimination_value(self, discrimination): """stub""" if not isinstance(discrimination, float): raise InvalidArgument('discrimination value must be a decimal') self.add_decimal_value(discrimination, 'discrimination') def set_pseudo_guessing_value(self, pseudo_guessing): """stub""" if not isinstance(pseudo_guessing, float): raise InvalidArgument('pseudo-guessing value must be a decimal') self.add_decimal_value(pseudo_guessing, 'pseudoGuessing') class IRTItemQueryRecord(QueryInitRecord): """query items by IRT attributes""" def match_minimum_difficulty(self, value, match): """stub""" self._my_osid_query._match_minimum_decimal('decimalValues.difficulty', value, match) def clear_minimum_difficulty_terms(self): """stub""" self._my_osid_query._clear_minimum_terms('decimalValues.difficulty') def match_maximum_difficulty(self, value, match): """stub""" self._my_osid_query._match_maximum_decimal('decimalValues.difficulty', value, match) def clear_maximum_difficulty_terms(self): """stub""" self._my_osid_query._clear_maximum_terms('decimalValues.difficulty') def match_minimum_discrimination(self, value, match): """stub""" self._my_osid_query._match_minimum_decimal('decimalValues.discrimination', value, match) def clear_miniumum_discrimination_terms(self): """stub""" self._my_osid_query._clear_minimum_terms('decimalValues.discrimination') def match_maximum_discrimination(self, value, match): """stub""" self._my_osid_query._match_maximum_decimal('decimalValues.discrimination', value, match) def clear_maximum_discrimination_terms(self): """stub""" self._my_osid_query._clear_maximum_terms('decimalValues.discrimination') def match_minimum_pseudo_guessing(self, value, match): """stub""" self._my_osid_query._match_minimum_decimal('decimalValues.pseudo_guessing', value, match) def clear_miniumum_pseudo_guessing_terms(self): """stub""" self._my_osid_query._clear_minimum_terms('decimalValues.pseudo_guessing') def match_maximum_pseudo_guessing(self, value, match): """stub""" self._my_osid_query._match_maximum_decimal('decimalValues.pseudo_guessing', value, match) def clear_maximum_pseudo_guessing_terms(self): """stub""" self._my_osid_query._clear_maximum_terms('decimalValues.pseudo_guessing')
mitsei/dlkit
dlkit/records/assessment/analytic/irt.py
Python
mit
7,495
# -*- coding: utf-8 -*- """'Books' part of product categories dictionary. Must hold subcategories of 'Books' category in the form of python dictionary data type. """ books = {('books', 'книги'): { ('audiobooks', 'аудиокниги'): { ('biography', 'биографии'): {}, ('business, economics', 'бизнес, экономика'): {}, ('kids', 'детское'): {}, ('fiction, literature', 'художественная литература'): {}, ('health, fitness', 'здоровье, фитнес'): {}, ('history', 'история'): {}, ('humor', 'юмор'): {}, ('parenting, family', 'семья, воспитание'): {}, ('philosophy', 'философия'): {}, ('poetry', 'поэзия'): {}, ('psychology', 'психология'): {}, ('religion, spirituality', 'религия, духовность'): {}, ('science, medicine', 'наука, медицина'): {}, ('self-help', 'самопомощь'): {}, ('travel', 'путешествия'): {}, ('other', 'другое'): {}, }, ('kids', 'детское'): { ('ABCs, numbers', 'алфавит, числа'): {}, ('activity, coloring books', 'раскраски, игры'): {}, ('bedtime', 'перед сном'): {}, ('classics', 'классика'): {}, ('fairy tales, myths', 'сказки, мифы'): {}, ('fiction', 'художественная литература'): {}, ('geography', 'география'): {}, ('history', 'история'): {}, ('humor', 'юмор'): {}, ('learning to read', 'учимся читать'): {}, ('nursery rhymes', 'стишки'): {}, ('picture books', 'иллюстрированные книги'): {}, ('poetry', 'поэзия'): {}, ('pop-up, movable', '3D, подвижные'): {}, ('religion', 'религия'): {}, ('science, nature', 'наука, природа'): {}, ('sports', 'спорт'): {}, ('foreign language', 'иностранный язык'): {}, ('other', 'другое'): {}, }, ('poetry', 'поэзия'): {}, ('cookbooks', 'кулинарные книги'): {}, ('comic books', 'комиксы'): {}, ('fiction', 'художественная литература'): { ('action, adventure', 'приключения'): {}, ('classics', 'классика'): {}, ('drama', 'драма'): {}, ('fantasy', 'фэнтези'): {}, ('folklore, mythology', 'фольклор, мифология'): {}, ('historical', 'историческое'): {}, ('horror', 'ужасы'): {}, ('humor', 'юмор'): {}, ('military', 'военное'): {}, ('mystery, thriller', 'мистерия, триллер'): {}, ('religious', 'религиозное'): {}, ('romance', 'романтичное'): {}, ('science fiction', 'научная фантастика'): {}, ('foreign language', 'иностранный язык'): {}, ('other', 'другое'): {}, }, ('non-fiction', 'научно-популярные'): { ('antiques, collectibles', 'антиквариат, коллекционное'): {}, ('architecture', 'архитектура'): {}, ('art', 'живопись, искусство'): {}, ('photography', 'фотография'): {}, ('biography', 'биографии'): {}, ('business, economics', 'бизнес, экономика'): {}, ('craft', 'рукоделие, ремесло'): {}, ('food, cooking', 'продукты, поваренные'): {}, ('design', 'дизайн'): {}, ('flora, fauna', 'флора, фауна'): {}, ('computers, internet', 'компьютер, интернет'): {}, ('family, relationships', 'семья, отношения'): {}, ('games, puzzles', 'игры, пазлы'): {}, ('health, fitness', 'здоровье, фитнес'): {}, ('history', 'история'): {}, ('hobbies', 'хобби'): {}, ('home, garden', 'дом, сад'): {}, ('humor', 'юмор'): {}, ('law', 'право'): {}, ('medical', 'медицина'): {}, ('military, war', 'военное'): {}, ('movies, tv', 'фильмы, тв'): {}, ('music', 'музыка'): {}, ('outdoor, nature', 'на природе'): {}, ('performing arts', 'исполнительное искусство'): {}, ('pets', 'домашние животные'): {}, ('philosophy', 'философия'): {}, ('politics', 'политика'): {}, ('psychology', 'психология'): {}, ('reference', 'справочники'): {}, ('religion, spirituality', 'религия, духовность'): {}, ('science, tech', 'наука, технология'): {}, ('self-help', 'самопомощь'): {}, ('social sciences', 'общественные науки'): {}, ('sports, recreation', 'спорт, активный отдых'): {}, ('teaching, education', 'обучение, образование'): {}, ('transport', 'транспорт'): {}, ('travel', 'путешествия'): {}, ('other', 'другое'): {}, }, ('school textbooks', 'школьные учебники'): { ('biology', 'биология'): {}, ('business, economics', 'бизнес, экономика'): {}, ('computers, internet', 'компьютер, интернет'): {}, ('chemistry', 'химия'): {}, ('english, grammar', 'английский'): {}, ('foreign language', 'иностранный язык'): {}, ('geography', 'география'): {}, ('history', 'история'): {}, ('humanities', 'гуманитарные науки'): {}, ('math', 'математика'): {}, ('music', 'музыка'): {}, ('performing arts', 'исполнительное искусство'): {}, ('philosophy', 'философия'): {}, ('physics', 'физика'): {}, ('psychology', 'психология'): {}, ('reference', 'справочники'): {}, ('religion', 'религия'): {}, ('science, tech', 'наука, технология'): {}, ('social sciences', 'общественные науки'): {}, ('sports', 'спорт'): {}, ('other', 'другое'): {}, }, ('textbooks', 'учебники'): { ('accounting', 'бухгалтерия'): {}, ('architecture, design', 'архитектура, дизайн'): {}, ('art, photography', 'искусство, фотография'): {}, ('biology', 'биология'): {}, ('business, economics', 'бизнес, экономика'): {}, ('computers, internet', 'компьютер, интернет'): {}, ('chemistry', 'химия'): {}, ('engineering', 'инжиниринг, технологии'): {}, ('english, grammar', 'английский'): {}, ('foreign language', 'иностранный язык'): {}, ('gardening, landscaping', 'ландшафт, садоводство'): {}, ('genetics', 'генетика'): {}, ('geography', 'география'): {}, ('history', 'история'): {}, ('humanities', 'гуманитарные науки'): {}, ('linguistics', 'лингвистика'): {}, ('law', 'право'): {}, ('management', 'менеджмент, управление'): {}, ('math', 'математика'): {}, ('medicine', 'медицина'): {}, ('marketing', 'маркетинг'): {}, ('music', 'музыка'): {}, ('performing arts', 'исполнительное искусство'): {}, ('philosophy', 'философия'): {}, ('physics', 'физика'): {}, ('political science', 'политические науки'): {}, ('psychology', 'психология'): {}, ('reference', 'справочники'): {}, ('religion', 'религия'): {}, ('science, tech', 'наука, технология'): {}, ('social sciences', 'общественные науки'): {}, ('sports', 'спорт'): {}, ('teaching, education', 'обучение, образование'): {}, ('other', 'другое'): {}, }, ('other', 'другое'): {}, } }
redmoo-info/proddict
ru/books.py
Python
mit
8,821
from rest_framework import serializers from bulbs.utils.fields import RichTextField from bulbs.utils.data_serializers import CopySerializer, EntrySerializer, BaseEntrySerializer from .fields import ColorField class XYEntrySerializer(BaseEntrySerializer): title = RichTextField(required=False, field_size="short") copy_x = RichTextField(field_size="long") copy_y = RichTextField(field_size="long") class ComparisonKeySerializer(serializers.Serializer): title = RichTextField(required=False, field_size="short") color = ColorField(required=False) initial = serializers.CharField() class ComparisonSerializer(serializers.Serializer): key_x = ComparisonKeySerializer(required=False) key_y = ComparisonKeySerializer(required=False) entries = XYEntrySerializer(required=False, many=True, child_label="entry") class ListInfographicDataSerializer(serializers.Serializer): is_numbered = serializers.BooleanField(default=False) entries = EntrySerializer(many=True, required=False, child_label="entry") class ProConSerializer(serializers.Serializer): body = RichTextField(required=False, field_size="long") pro = CopySerializer(required=False, many=True) con = CopySerializer(required=False, many=True) class StrongSideWeakSideSerializer(serializers.Serializer): body = RichTextField(required=False, field_size="long") strong = CopySerializer(required=False, many=True) weak = CopySerializer(required=False, many=True) class TimelineSerializer(serializers.Serializer): entries = EntrySerializer(many=True, required=False, child_label="entry")
theonion/django-bulbs
bulbs/infographics/data_serializers.py
Python
mit
1,619
from utilities.compression import SoSCompression class SparqlDecompress: """Performs SPARQL sos-decompression""" name = "SparqlCompress" description = "Performs SPARQL sos-decompression" @staticmethod def execute(token): message = token.message body = message.get_body() decompressed_body_parts = SoSCompression.decompress_sparql(body) decompressed_body = (b' '.join(decompressed_body_parts)).decode('utf-8') # only if no errors occured if not decompressed_body.startswith('org.apache.jena'): token.message.body = ' '.join(decompressed_body.split())
onnovalkering/sparql-over-sms
sos-service/src/processing/filters/sparqldecompress.py
Python
mit
635
from os import walk from definitions import Config class Walker(object): def __init__(self, directory): super(Walker, self).__init__() cfg = Config() for (_path, _file, _archives) in walk(cfg.globals['root'] + directory): pass self.archives = _archives
innusource/siteg.py
_app/walker.py
Python
mit
303
# -*- coding: utf-8 -*- # Generated by Django 1.11.23 on 2020-09-10 14:23 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("chroma_core", "0024_mountsnapshotjob_unmountsnapshotjob"), ] operations = [ migrations.CreateModel( name="CreateSnapshotJob", fields=[ ( "job_ptr", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to="chroma_core.Job", ), ), ("fqdn", models.CharField(help_text=b"MGS host to create the snapshot on", max_length=256)), ("fsname", models.CharField(help_text=b"Lustre filesystem name", max_length=8)), ("name", models.CharField(help_text=b"Snapshot to create", max_length=64)), ( "comment", models.CharField(help_text=b"Optional comment for the snapshot", max_length=1024, null=True), ), ( "use_barrier", models.BooleanField( default=False, help_text=b"Set write barrier before creating snapshot. The default value is False", ), ), ], options={ "ordering": ["id"], }, bases=("chroma_core.job",), ), migrations.CreateModel( name="DestroySnapshotJob", fields=[ ( "job_ptr", models.OneToOneField( auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to="chroma_core.Job", ), ), ("fqdn", models.CharField(help_text=b"MGS host to destroy the snapshot on", max_length=256)), ("fsname", models.CharField(help_text=b"Lustre filesystem name", max_length=8)), ("name", models.CharField(help_text=b"Snapshot to destroy", max_length=64)), ("force", models.BooleanField(default=False, help_text=b"Destroy the snapshot with force")), ], options={ "ordering": ["id"], }, bases=("chroma_core.job",), ), ]
intel-hpdd/intel-manager-for-lustre
chroma_core/migrations/0025_createsnapshotjob_destroysnapshotjob.py
Python
mit
2,819
""" ================ :mod:`variators` ================ .. Copyright 2012 Aaron Garrett .. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: .. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. .. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. .. module:: variators .. moduleauthor:: Aaron Garrett <[email protected]> """ def default_variation(random, candidates, args): """Return the set of candidates without variation. .. Arguments: random -- the random number generator object candidates -- the candidate solutions args -- a dictionary of keyword arguments """ return candidates
aarongarrett/inspyred
inspyred/ec/variators/variators.py
Python
mit
1,660
# Created By: Virgil Dupras # Created On: 2004/12/07 # Copyright 2010 Hardcoded Software (http://www.hardcoded.net) # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license import struct from .util import FileOrPath from .genres import genre_by_index TAG_VERSION_1_0 = 1 TAG_VERSION_1_1 = 2 #id3v1 specs #0-2:"TAG" #3-32:Title #33-62:Artist #63-92:Album #93-96:Year #97-126:Comment #127:Genre def _arrange_id3_field(raw_field): """Format the read field properly This function takes only the part of the string before the first \0 char. After this, it checks if the string has to be converted to unicode and convert it if it indeed does. """ decoded = str(raw_field, 'iso8859-1') result = decoded.split('\0') if len(result) > 0: result = result[0].rstrip().replace('\n', ' ').replace('\r', ' ') else: result = '' return result class Id3v1(object): def __init__(self, infile): self.version = 0 self.size = 0 self.title = '' self.artist = '' self.album = '' self.year = '' self.genre = '' self.comment = '' self.track = 0 with FileOrPath(infile) as fp: self._read_file(fp) def _read_file(self, fp): fp.seek(0, 2) position = fp.tell() if position and position >= 128: fp.seek(-128, 2) self._read_tag(fp.read(128)) def _read_tag(self, data): if data[0:3] != b'TAG': return #check if the comment field contains track info if ((data[125] == 0) and (data[126] != 0)) or ((data[125] == 0x20) and (data[126] != 0x20)): #We have a v1.1 self.version = TAG_VERSION_1_1 self.track = min(data[126], 99) self.comment = _arrange_id3_field(data[97:125]) else: self.version = TAG_VERSION_1_0 self.track = 0 self.comment = _arrange_id3_field(data[97:127]) self.title = _arrange_id3_field(data[3:33]) self.artist = _arrange_id3_field(data[33:63]) self.album = _arrange_id3_field(data[63:93]) self.year = _arrange_id3_field(data[93:97]) genre = data[127] self.genre = genre_by_index(genre) self.size = 128 @property def exists(self): return self.size > 0
jmtchllrx/pyMuse
src/hsaudiotag/id3v1.py
Python
mit
2,528
from rest_framework.views import APIView from rest_framework import viewsets from game.serializers import * from rest_framework.response import Response from game.models import * from rest_framework.permissions import IsAuthenticated from django.shortcuts import get_object_or_404 from django.http import Http404 class UserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = User.objects.all().order_by('-date_joined') serializer_class = UserSerializer class PlayerGameViewSet(viewsets.ViewSet): """ API endpoint for player games """ def list(self, request): queryset = Game.get_games_for_player(self.request.user) serializer = GameSerializer( queryset, many=True, context={'request': request}) return Response(serializer.data) class AvailableGameViewSet(viewsets.ViewSet): """ API endpoint for available/open games """ def list(self, request): queryset = Game.get_available_games() serializer = GameSerializer(queryset, many=True) return Response(serializer.data) class CurrentUserView(APIView): def get(self, request): serializer = UserSerializer(request.user) return Response(serializer.data) class SingleGameViewSet(APIView): """ Get all data for a game: Game Details, Squares, & Log """ def get(self, request, **kwargs): game = Game.get_by_id(kwargs['game_id']) log = game.get_game_log() squares = game.get_all_game_squares() game_serializer = GameSerializer(game) log_serializer = GameLogSerializer(log, many=True) square_serializer = GameSquareSerializer(squares, many=True) return_data = {'game': game_serializer.data, 'log': log_serializer.data, 'squares': square_serializer.data} return Response(return_data) class GameSquaresViewSet(viewsets.ViewSet): def retrieve(self, request, pk=None): game = get_object_or_404(Game, pk=pk) squares = game.get_all_game_squares() serializer = GameSquareSerializer(squares, many=True) return Response(serializer.data)
codyparker/channels-obstruction
game/views/api_views.py
Python
mit
2,229
#Decorators from django.http import HttpResponseRedirect from gameheart.entities.models import UserProfile def check_terms(function): def wrap(request, *args, **kwargs): profile = UserProfile.objects.get(user=request.user) if profile.acceptedterms == False: action = request.path_info red = ''.join(['/terms/?next=',action]) return HttpResponseRedirect(red) else: return function(request, *args, **kwargs) wrap.__doc__=function.__doc__ wrap.__name__=function.__name__ return wrap
undergroundtheater/gameheart
gameheart/entities/decorators.py
Python
mit
570
from __future__ import ( unicode_literals, absolute_import, print_function, division, ) from aaf2.auid import AUID from uuid import UUID import uuid import unittest class MobIDTests(unittest.TestCase): def test_basic(self): s = "0d010101-0101-2100-060e-2b3402060101" v = AUID(s) u = UUID(s) assert str(v) == s assert str(v.uuid) == s assert v.uuid == u def test_be(self): s = "0d010101-0101-2100-060e-2b3402060101" v = AUID(s) u = UUID(s) assert v.uuid.bytes == v.bytes_be def test_int(self): s = "0d010101-0101-2100-060e-2b3402060101" v = AUID(s) u = UUID(s) assert v.int == u.int v = AUID(int=100) u = UUID(int=100) assert v.int == u.int for i in range(10): u = uuid.uuid4() a = AUID(int= u.int) assert u.int == a.int def test_noargs(self): # expected behavour matches uuid.py with self.assertRaises(TypeError): AUID() # print(v.int) if __name__ == "__main__": import logging # logging.basicConfig(level=logging.DEBUG) unittest.main()
markreidvfx/pyaaf2
tests/test_auid.py
Python
mit
1,210
from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm, AuthenticationForm from django import forms class UserRegistrationForm(UserCreationForm): class Meta: model = User fields = ('username', 'email',)
cscanlin/munger-builder
munger_builder/forms.py
Python
mit
269
# coding: utf-8 from google.appengine.ext import ndb from flask.ext import restful import flask from api import helpers import auth import model import util from main import api_v1 ############################################################################### # Admin ############################################################################### @api_v1.resource('/admin/song/', endpoint='api.admin.song.list') class AdminSongListAPI(restful.Resource): @auth.admin_required def get(self): song_keys = util.param('song_keys', list) if song_keys: song_db_keys = [ndb.Key(urlsafe=k) for k in song_keys] song_dbs = ndb.get_multi(song_db_keys) return helpers.make_response(song_dbs, model.song.FIELDS) song_dbs, song_cursor = model.Song.get_dbs() return helpers.make_response(song_dbs, model.Song.FIELDS, song_cursor) @api_v1.resource('/admin/song/<string:song_key>/', endpoint='api.admin.song') class AdminSongAPI(restful.Resource): @auth.admin_required def get(self, song_key): song_db = ndb.Key(urlsafe=song_key).get() if not song_db: helpers.make_not_found_exception('song %s not found' % song_key) return helpers.make_response(song_db, model.Song.FIELDS)
lipis/the-smallest-creature
main/api/v1/song.py
Python
mit
1,226
# Create your views here. from django.shortcuts import render_to_response, get_object_or_404, get_list_or_404 from django.http import Http404 from echo.models import Comment, Reply, CategoryMeta from echo.forms import RegistrationForm, ConfirmRegistration from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, HttpResponseNotModified from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.contrib.auth import logout from django.contrib.auth.forms import UserCreationForm from django.template import RequestContext from haystack.query import SearchQuerySet from django import forms from voting.models import Vote def index_view(request): faq_list = Comment.objects.filter(category='QUESTIONS')[:6] issue_list = Comment.objects.filter(category='ISSUES')[:6] idea_list = Comment.objects.filter(category='IDEAS')[:6] praise_list = Comment.objects.filter(category='PRAISE')[:6] return render_to_response('echo/index.html', {'faq_list':faq_list, 'issue_list':issue_list, 'idea_list':idea_list, 'praise_list':praise_list }, context_instance=RequestContext(request)) #user authentication and management... def logout_view(request): logout(request) return HttpResponseRedirect('/echo') def registration(request): if request.method == 'POST': f = RegistrationForm(request.POST) if f.is_valid(): f.saveUnvalidatedUser() return HttpResponseRedirect('/echo/') else: f = RegistrationForm() return render_to_response("registration/register.html", { 'form' : f }, context_instance=RequestContext(request)) def confirmRegistration(request, user=None, activation_key=None): if request.method == 'POST': print request.POST f = ConfirmRegistration(request.POST) if f.is_valid(): try: f.validateUserRegistration() message = "Your user is now activated!" except DoesNotExist: message = "User doesn't exist?" return render_to_respose("registration/complete_registration.html", { 'message' : message }, context_instance=RequestContext(request)) else: #initial GET... u = get_object_or_404(User,username=user) if u.check_password(activation_key): f = ConfirmRegistration() f.user_name = user f.activation_key = activation_key else: raise Http404 return render_to_response("registration/confirm_registration.html", { 'form' : f }, context_instance=RequestContext(request)) @login_required def newComment(request, category=None, comment=None): return render_to_response('echo/new_comment.html', {'category' : category, 'comment' : comment}, context_instance=RequestContext(request)) @login_required def postComment(request): c = Comment() c.body = request.POST['body'] c.category = request.POST['category'] u = request.user c.commenter = u c.save() return render_to_response('echo/edit_comment.html', {'comment' : c}, context_instance=RequestContext(request)) def commentDetails(request, comment_id): c = Comment.objects.get(pk=comment_id) r = c.reply_set.all() return render_to_response('echo/edit_comment.html', {'comment' : c, 'replies' : r}, context_instance=RequestContext(request)) @login_required def reply(request, comment_id): c = Comment.objects.get(pk=comment_id) r = Reply(comment=c, commenter=request.user, body=request.POST['body']) r.save() return render_to_response('echo/edit_comment.html', {'comment' : c, 'replies' : c.reply_set.all()}, context_instance=RequestContext(request)) def category(request, category): comment_list = Comment.objects.filter(category=category) return render_to_response('echo/category_list.html', { 'comments' : comment_list, 'category' : category}, context_instance=RequestContext(request)) def search(request): searchText = request.POST['search'] return render_to_response('search/search.html', { 'results' : SearchQuerySet().auto_query(searchText), 'query' : searchText }, context_instance=RequestContext(request)) def ajax_search(request): search_text = request.POST['search_text'] category = request.POST['category'] print "search_text = [" + search_text + "]\n" print "category = [" + category + "]\n" return render_to_response('search/ajax_search.html', { 'results' : SearchQuerySet().auto_query(search_text), 'query' : search_text, 'category' : category, } , context_instance=RequestContext(request)) def commentVote(request, comment_id): c = Comment.objects.get(pk=comment_id) Vote.objects.record_vote(c,request.user,+1) return render_to_response('echo/record_vote.html', {}, context_instance=RequestContext(request))
robneville73/OpenEcho
echo/views.py
Python
mit
6,048
import MuseScoreHelper as MuseScore import SerialHelper while True: # Print a menu of available operations print ("AVAILABLE OPERATIONS:") print (" 1. Connect Device\n") print (" 2. Load Music File\n") print (" 3. Exit Program\n") # Loop until valid input is provided while True: try: operation_code = int(input("SELECT OPERATION: ")) if operation_code >= 1 and operation_code <= 3: break except: pass # Print spacer print("\n...\n") # CONNECT DEVICE if operation_code == 1: # Allow the user to select from a list of devices, # and then form a connection with the given device device = SerialHelper.select_device() if type(device) != None: Arduino = SerialHelper.device_begin(device) else: print("No device found") print("\n...\n") # LOAD MUSIC FILE elif operation_code == 2: # Asks the user for a file to load music from, and # loads it into a list of tuples (pitch, duration) Music_Raw = MuseScore.read_file() # Loop through the music tracks that were returned # (one track will be played on one buzzer) for i in range(0, len(Music_Raw)): print("BUZZER " + str(i) + ":") for i in Music_Raw[i]: print (" " + str(i[0]) + " " + str(i[1])) # Print a divider print("\n...\n") # Loop through the musical tuples and build a byte # array that can be sent to the Arduino output_b1 = bytes(Music_Raw[0][1]) for i in Music_Raw[0][2:]: output_b1 += bytes(i) output_b2 = bytes(Music_Raw[1][1]) for i in Music_Raw[1][2:]: output_b2 += bytes(i) print(output_b1) print(output_b2) # Write 255 to the Arduino to begin a transmission Arduino.write(bytes([255])) # The Arduino will send back the maximum notes it # can handle (this is per-buzzer). From this we # decide how many we are actually going to send Max_notes = int(SerialHelper.device_readline(Arduino)) Num_buzzers = int(SerialHelper.device_readline(Arduino)) Num_notes = Max_notes if int(len(output_b1)/2) > Max_notes else int(len(output_b1)/2) # Send a message dictating how many notes will be sent # to the Arduino Arduino.write(SerialHelper.pack_short(Num_notes)) # Next we send the note output for Buzzer one Arduino.write(output_b1[0:Num_notes*2]) # Send a message dictating how many notes will be sent # to the Arduino Arduino.write(SerialHelper.pack_short(Num_notes)) # Next we send the note output for Buzzer one Arduino.write(output_b2[0:Num_notes*2]) # Print a nice overview at the end print("Music Transfer Complete:") print(" Arduino with " + str(Num_buzzers) + " buzzers") print(" " + str(Num_notes) + " notes transferred") elif operation_code == 3: print("Exiting Program") break
justephens/ArduMusic
ArduMusic.py
Python
mit
3,240
#!/home/dark/Exile/Git/AutoLavadox/maq_autolavadox/bin/python """PILdriver, an image-processing calculator using PIL. An instance of class PILDriver is essentially a software stack machine (Polish-notation interpreter) for sequencing PIL image transformations. The state of the instance is the interpreter stack. The only method one will normally invoke after initialization is the `execute' method. This takes an argument list of tokens, pushes them onto the instance's stack, and then tries to clear the stack by successive evaluation of PILdriver operators. Any part of the stack not cleaned off persists and is part of the evaluation context for the next call of the execute method. PILDriver doesn't catch any exceptions, on the theory that these are actually diagnostic information that should be interpreted by the calling code. When called as a script, the command-line arguments are passed to a PILDriver instance. If there are no command-line arguments, the module runs an interactive interpreter, each line of which is split into space-separated tokens and passed to the execute method. In the method descriptions below, a first line beginning with the string `usage:' means this method can be invoked with the token that follows it. Following <>-enclosed arguments describe how the method interprets the entries on the stack. Each argument specification begins with a type specification: either `int', `float', `string', or `image'. All operations consume their arguments off the stack (use `dup' to keep copies around). Use `verbose 1' to see the stack state displayed before each operation. Usage examples: `show crop 0 0 200 300 open test.png' loads test.png, crops out a portion of its upper-left-hand corner and displays the cropped portion. `save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it 30 degrees, and saves the result as rotated.png (in PNG format). """ # by Eric S. Raymond <[email protected]> # $Id$ # TO DO: # 1. Add PILFont capabilities, once that's documented. # 2. Add PILDraw operations. # 3. Add support for composing and decomposing multiple-image files. # from __future__ import print_function from PIL import Image class PILDriver(object): verbose = 0 def do_verbose(self): """usage: verbose <int:num> Set verbosity flag from top of stack. """ self.verbose = int(self.do_pop()) # The evaluation stack (internal only) stack = [] # Stack of pending operations def push(self, item): "Push an argument onto the evaluation stack." self.stack.insert(0, item) def top(self): "Return the top-of-stack element." return self.stack[0] # Stack manipulation (callable) def do_clear(self): """usage: clear Clear the stack. """ self.stack = [] def do_pop(self): """usage: pop Discard the top element on the stack. """ return self.stack.pop(0) def do_dup(self): """usage: dup Duplicate the top-of-stack item. """ if hasattr(self, 'format'): # If it's an image, do a real copy dup = self.stack[0].copy() else: dup = self.stack[0] self.push(dup) def do_swap(self): """usage: swap Swap the top-of-stack item with the next one down. """ self.stack = [self.stack[1], self.stack[0]] + self.stack[2:] # Image module functions (callable) def do_new(self): """usage: new <int:xsize> <int:ysize> <int:color>: Create and push a greyscale image of given size and color. """ xsize = int(self.do_pop()) ysize = int(self.do_pop()) color = int(self.do_pop()) self.push(Image.new("L", (xsize, ysize), color)) def do_open(self): """usage: open <string:filename> Open the indicated image, read it, push the image on the stack. """ self.push(Image.open(self.do_pop())) def do_blend(self): """usage: blend <image:pic1> <image:pic2> <float:alpha> Replace two images and an alpha with the blended image. """ image1 = self.do_pop() image2 = self.do_pop() alpha = float(self.do_pop()) self.push(Image.blend(image1, image2, alpha)) def do_composite(self): """usage: composite <image:pic1> <image:pic2> <image:mask> Replace two images and a mask with their composite. """ image1 = self.do_pop() image2 = self.do_pop() mask = self.do_pop() self.push(Image.composite(image1, image2, mask)) def do_merge(self): """usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]] Merge top-of stack images in a way described by the mode. """ mode = self.do_pop() bandlist = [] for band in mode: bandlist.append(self.do_pop()) self.push(Image.merge(mode, bandlist)) # Image class methods def do_convert(self): """usage: convert <string:mode> <image:pic1> Convert the top image to the given mode. """ mode = self.do_pop() image = self.do_pop() self.push(image.convert(mode)) def do_copy(self): """usage: copy <image:pic1> Make and push a true copy of the top image. """ self.dup() def do_crop(self): """usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1> Crop and push a rectangular region from the current image. """ left = int(self.do_pop()) upper = int(self.do_pop()) right = int(self.do_pop()) lower = int(self.do_pop()) image = self.do_pop() self.push(image.crop((left, upper, right, lower))) def do_draft(self): """usage: draft <string:mode> <int:xsize> <int:ysize> Configure the loader for a given mode and size. """ mode = self.do_pop() xsize = int(self.do_pop()) ysize = int(self.do_pop()) self.push(self.draft(mode, (xsize, ysize))) def do_filter(self): """usage: filter <string:filtername> <image:pic1> Process the top image with the given filter. """ from PIL import ImageFilter imageFilter = getattr(ImageFilter, self.do_pop().upper()) image = self.do_pop() self.push(image.filter(imageFilter)) def do_getbbox(self): """usage: getbbox Push left, upper, right, and lower pixel coordinates of the top image. """ bounding_box = self.do_pop().getbbox() self.push(bounding_box[3]) self.push(bounding_box[2]) self.push(bounding_box[1]) self.push(bounding_box[0]) def do_getextrema(self): """usage: extrema Push minimum and maximum pixel values of the top image. """ extrema = self.do_pop().extrema() self.push(extrema[1]) self.push(extrema[0]) def do_offset(self): """usage: offset <int:xoffset> <int:yoffset> <image:pic1> Offset the pixels in the top image. """ xoff = int(self.do_pop()) yoff = int(self.do_pop()) image = self.do_pop() self.push(image.offset(xoff, yoff)) def do_paste(self): """usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground> Paste figure image into ground with upper left at given offsets. """ figure = self.do_pop() xoff = int(self.do_pop()) yoff = int(self.do_pop()) ground = self.do_pop() if figure.mode == "RGBA": ground.paste(figure, (xoff, yoff), figure) else: ground.paste(figure, (xoff, yoff)) self.push(ground) def do_resize(self): """usage: resize <int:xsize> <int:ysize> <image:pic1> Resize the top image. """ ysize = int(self.do_pop()) xsize = int(self.do_pop()) image = self.do_pop() self.push(image.resize((xsize, ysize))) def do_rotate(self): """usage: rotate <int:angle> <image:pic1> Rotate image through a given angle """ angle = int(self.do_pop()) image = self.do_pop() self.push(image.rotate(angle)) def do_save(self): """usage: save <string:filename> <image:pic1> Save image with default options. """ filename = self.do_pop() image = self.do_pop() image.save(filename) def do_save2(self): """usage: save2 <string:filename> <string:options> <image:pic1> Save image with specified options. """ filename = self.do_pop() options = self.do_pop() image = self.do_pop() image.save(filename, None, options) def do_show(self): """usage: show <image:pic1> Display and pop the top image. """ self.do_pop().show() def do_thumbnail(self): """usage: thumbnail <int:xsize> <int:ysize> <image:pic1> Modify the top image in the stack to contain a thumbnail of itself. """ ysize = int(self.do_pop()) xsize = int(self.do_pop()) self.top().thumbnail((xsize, ysize)) def do_transpose(self): """usage: transpose <string:operator> <image:pic1> Transpose the top image. """ transpose = self.do_pop().upper() image = self.do_pop() self.push(image.transpose(transpose)) # Image attributes def do_format(self): """usage: format <image:pic1> Push the format of the top image onto the stack. """ self.push(self.do_pop().format) def do_mode(self): """usage: mode <image:pic1> Push the mode of the top image onto the stack. """ self.push(self.do_pop().mode) def do_size(self): """usage: size <image:pic1> Push the image size on the stack as (y, x). """ size = self.do_pop().size self.push(size[0]) self.push(size[1]) # ImageChops operations def do_invert(self): """usage: invert <image:pic1> Invert the top image. """ from PIL import ImageChops self.push(ImageChops.invert(self.do_pop())) def do_lighter(self): """usage: lighter <image:pic1> <image:pic2> Pop the two top images, push an image of the lighter pixels of both. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.lighter(image1, image2)) def do_darker(self): """usage: darker <image:pic1> <image:pic2> Pop the two top images, push an image of the darker pixels of both. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.darker(image1, image2)) def do_difference(self): """usage: difference <image:pic1> <image:pic2> Pop the two top images, push the difference image """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.difference(image1, image2)) def do_multiply(self): """usage: multiply <image:pic1> <image:pic2> Pop the two top images, push the multiplication image. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.multiply(image1, image2)) def do_screen(self): """usage: screen <image:pic1> <image:pic2> Pop the two top images, superimpose their inverted versions. """ from PIL import ImageChops image2 = self.do_pop() image1 = self.do_pop() self.push(ImageChops.screen(image1, image2)) def do_add(self): """usage: add <image:pic1> <image:pic2> <int:offset> <float:scale> Pop the two top images, produce the scaled sum with offset. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() scale = float(self.do_pop()) offset = int(self.do_pop()) self.push(ImageChops.add(image1, image2, scale, offset)) def do_subtract(self): """usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale> Pop the two top images, produce the scaled difference with offset. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() scale = float(self.do_pop()) offset = int(self.do_pop()) self.push(ImageChops.subtract(image1, image2, scale, offset)) # ImageEnhance classes def do_color(self): """usage: color <image:pic1> Enhance color in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Color(image) self.push(enhancer.enhance(factor)) def do_contrast(self): """usage: contrast <image:pic1> Enhance contrast in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Contrast(image) self.push(enhancer.enhance(factor)) def do_brightness(self): """usage: brightness <image:pic1> Enhance brightness in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Brightness(image) self.push(enhancer.enhance(factor)) def do_sharpness(self): """usage: sharpness <image:pic1> Enhance sharpness in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Sharpness(image) self.push(enhancer.enhance(factor)) # The interpreter loop def execute(self, list): "Interpret a list of PILDriver commands." list.reverse() while len(list) > 0: self.push(list[0]) list = list[1:] if self.verbose: print("Stack: " + repr(self.stack)) top = self.top() if not isinstance(top, str): continue funcname = "do_" + top if not hasattr(self, funcname): continue else: self.do_pop() func = getattr(self, funcname) func() if __name__ == '__main__': import sys # If we see command-line arguments, interpret them as a stack state # and execute. Otherwise go interactive. driver = PILDriver() if len(sys.argv[1:]) > 0: driver.execute(sys.argv[1:]) else: print("PILDriver says hello.") while True: try: if sys.version_info[0] >= 3: line = input('pildriver> ') else: line = raw_input('pildriver> ') except EOFError: print("\nPILDriver says goodbye.") break driver.execute(line.split()) print(driver.stack) # The following sets edit modes for GNU EMACS # Local Variables: # mode:python # End:
exildev/AutoLavadox
maq_autolavadox/bin/pildriver.py
Python
mit
15,553
import pytest sa = pytest.importorskip("sqlalchemy") import os import responses import flask from flask_sqlalchemy import SQLAlchemy from sqlalchemy import event from flask_caching import Cache from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user from flask_dance.consumer import OAuth2ConsumerBlueprint, oauth_authorized, oauth_error from flask_dance.consumer.storage.sqla import OAuthConsumerMixin, SQLAlchemyStorage try: import blinker except ImportError: blinker = None requires_blinker = pytest.mark.skipif(not blinker, reason="requires blinker") pytestmark = [pytest.mark.usefixtures("responses")] @pytest.fixture def blueprint(): "Make a OAuth2 blueprint for a fictional OAuth provider" bp = OAuth2ConsumerBlueprint( "test-service", __name__, client_id="client_id", client_secret="client_secret", state="random-string", base_url="https://example.com", authorization_url="https://example.com/oauth/authorize", token_url="https://example.com/oauth/access_token", redirect_url="/oauth_done", ) responses.add( responses.POST, "https://example.com/oauth/access_token", body='{"access_token":"foobar","token_type":"bearer","scope":""}', ) return bp @pytest.fixture def db(): "Make a Flask-SQLAlchemy instance" return SQLAlchemy() @pytest.fixture def app(blueprint, db, request): "Make a Flask app, attach Flask-SQLAlchemy, and establish an app context" app = flask.Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get("DATABASE_URI", "sqlite://") app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False app.config["CACHE_TYPE"] = "simple" app.secret_key = "secret" app.register_blueprint(blueprint, url_prefix="/login") db.init_app(app) # establish app context ctx = app.app_context() ctx.push() request.addfinalizer(ctx.pop) return app class record_queries: """ A context manager for recording the SQLAlchemy queries that were executed in a given context block. """ def __init__(self, target, identifier="before_cursor_execute"): self.target = target self.identifier = identifier def record_query(self, conn, cursor, statement, parameters, context, executemany): self.queries.append(statement) def __enter__(self): self.queries = [] event.listen(self.target, self.identifier, self.record_query) return self.queries def __exit__(self, exc_type, exc_value, traceback): event.remove(self.target, self.identifier, self.record_query) def test_sqla_storage_without_user(app, db, blueprint, request): class OAuth(OAuthConsumerMixin, db.Model): pass blueprint.storage = SQLAlchemyStorage(OAuth, db.session) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 2 # check the database authorizations = OAuth.query.all() assert len(authorizations) == 1 oauth = authorizations[0] assert oauth.provider == "test-service" assert isinstance(oauth.token, dict) assert oauth.token == { "access_token": "foobar", "token_type": "bearer", "scope": [""], } def test_sqla_model_repr(app, db, request): class MyAwesomeOAuth(OAuthConsumerMixin, db.Model): pass db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) o = MyAwesomeOAuth() assert "MyAwesomeOAuth" in repr(o) o.provider = "supercool" assert 'provider="supercool"' in repr(o) o.token = {"access_token": "secret"} assert "secret" not in repr(o) db.session.add(o) db.session.commit() assert "id=" in repr(o) assert "secret" not in repr(o) def test_sqla_storage(app, db, blueprint, request): class User(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # for now, we'll assume that Alice is the only user alice = User(name="Alice") db.session.add(alice) db.session.commit() # load alice's ID -- this issues a database query alice.id blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=alice) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 3 # check the database alice = User.query.first() authorizations = OAuth.query.all() assert len(authorizations) == 1 oauth = authorizations[0] assert oauth.user_id == alice.id assert oauth.provider == "test-service" assert isinstance(oauth.token, dict) assert oauth.token == { "access_token": "foobar", "token_type": "bearer", "scope": [""], } def test_sqla_load_token_for_user(app, db, blueprint, request): class User(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # set token storage blueprint.storage = SQLAlchemyStorage(OAuth, db.session) # make users and OAuth tokens for several people alice = User(name="Alice") alice_token = {"access_token": "alice123", "token_type": "bearer"} alice_oauth = OAuth(user=alice, token=alice_token, provider="test-service") bob = User(name="Bob") bob_token = {"access_token": "bob456", "token_type": "bearer"} bob_oauth = OAuth(user=bob, token=bob_token, provider="test-service") sue = User(name="Sue") sue_token = {"access_token": "sue789", "token_type": "bearer"} sue_oauth = OAuth(user=sue, token=sue_token, provider="test-service") db.session.add_all([alice, bob, sue, alice_oauth, bob_oauth, sue_oauth]) db.session.commit() # by default, we should not have a token for anyone sess = blueprint.session assert not sess.token assert not blueprint.token # load token for various users blueprint.config["user"] = alice assert sess.token == alice_token assert blueprint.token == alice_token blueprint.config["user"] = bob assert sess.token == bob_token assert blueprint.token == bob_token blueprint.config["user"] = alice assert sess.token == alice_token assert blueprint.token == alice_token blueprint.config["user"] = sue assert sess.token == sue_token assert blueprint.token == sue_token # load for user ID as well del blueprint.config["user"] blueprint.config["user_id"] = bob.id assert sess.token == bob_token assert blueprint.token == bob_token # try deleting user tokens del blueprint.token assert sess.token == None assert blueprint.token == None # shouldn't affect alice's token blueprint.config["user_id"] = alice.id assert sess.token == alice_token assert blueprint.token == alice_token def test_sqla_flask_login(app, db, blueprint, request): login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # create some users u1 = User(name="Alice") u2 = User(name="Bob") u3 = User(name="Chuck") db.session.add_all([u1, u2, u3]) db.session.commit() # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # set alice as the logged in user sess["_user_id"] = u1.id # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 4 # lets do it again, with Bob as the logged in user -- he gets a different token responses.reset() responses.add( responses.POST, "https://example.com/oauth/access_token", body='{"access_token":"abcdef","token_type":"bearer","scope":"bob"}', ) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # set bob as the logged in user sess["_user_id"] = u2.id # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 4 # check the database authorizations = OAuth.query.all() assert len(authorizations) == 2 u1_oauth = OAuth.query.filter_by(user=u1).one() assert u1_oauth.provider == "test-service" assert u1_oauth.token == { "access_token": "foobar", "token_type": "bearer", "scope": [""], } u2_oauth = OAuth.query.filter_by(user=u2).one() assert u2_oauth.provider == "test-service" assert u2_oauth.token == { "access_token": "abcdef", "token_type": "bearer", "scope": ["bob"], } u3_oauth = OAuth.query.filter_by(user=u3).all() assert len(u3_oauth) == 0 @requires_blinker def test_sqla_flask_login_misconfigured(app, db, blueprint, request): login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) calls = [] def callback(*args, **kwargs): calls.append((args, kwargs)) oauth_error.connect(callback) request.addfinalizer(lambda: oauth_error.disconnect(callback)) with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(calls) == 1 assert calls[0][0] == (blueprint,) error = calls[0][1]["error"] assert isinstance(error, ValueError) assert str(error) == "Cannot set OAuth token without an associated user" @requires_blinker def test_sqla_flask_login_anon_to_authed(app, db, blueprint, request): login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) # create a user object when OAuth succeeds def logged_in(sender, token): assert token assert blueprint == sender resp = sender.session.get("/user") user = User(name=resp.json()["name"]) login_user(user) db.session.add(user) db.session.commit() flask.flash("Signed in successfully") oauth_authorized.connect(logged_in, blueprint) request.addfinalizer(lambda: oauth_authorized.disconnect(logged_in, blueprint)) # mock out the `/user` API call responses.add( responses.GET, "https://example.com/user", body='{"name":"josephine"}' ) with record_queries(db.engine) as queries: with app.test_client() as client: with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 5 # check the database users = User.query.all() assert len(users) == 1 user = users[0] assert user.name == "josephine" authorizations = OAuth.query.all() assert len(authorizations) == 1 oauth = authorizations[0] assert oauth.provider == "test-service" assert oauth.token == { "access_token": "foobar", "token_type": "bearer", "scope": [""], } assert oauth.user_id == user.id def test_sqla_flask_login_preload_logged_in_user(app, db, blueprint, request): # need a URL to hit, so that tokens will be loaded, but result is irrelevant responses.add(responses.GET, "https://example.com/noop") login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # create some users, and tokens for some of them alice = User(name="Alice") alice_token = {"access_token": "alice123", "token_type": "bearer"} alice_oauth = OAuth(user=alice, token=alice_token, provider="test-service") bob = User(name="Bob") bob_token = {"access_token": "bob456", "token_type": "bearer"} bob_oauth = OAuth(user=bob, token=bob_token, provider="test-service") chuck = User(name="Chuck") # chuck doesn't get a token db.session.add_all([alice, alice_oauth, bob, bob_oauth, chuck]) db.session.commit() # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) # create a simple view @app.route("/") def index(): return "success" with app.test_request_context("/"): login_user(alice) # hit /noop to load tokens blueprint.session.get("/noop") # now the flask-dance session should have Alice's token loaded assert blueprint.session.token == alice_token with app.test_request_context("/"): # set bob as the logged in user login_user(bob) # hit /noop to load tokens blueprint.session.get("/noop") # now the flask-dance session should have Bob's token loaded assert blueprint.session.token == bob_token with app.test_request_context("/"): # now let's try chuck login_user(chuck) blueprint.session.get("/noop") assert blueprint.session.token == None with app.test_request_context("/"): # no one is logged in -- this is an anonymous user logout_user() with pytest.raises(ValueError): blueprint.session.get("/noop") def test_sqla_flask_login_no_user_required(app, db, blueprint, request): # need a URL to hit, so that tokens will be loaded, but result is irrelevant responses.add(responses.GET, "https://example.com/noop") login_manager = LoginManager(app) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) class OAuth(OAuthConsumerMixin, db.Model): user_id = db.Column(db.Integer, db.ForeignKey(User.id)) user = db.relationship(User) blueprint.storage = SQLAlchemyStorage( OAuth, db.session, user=current_user, user_required=False ) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # configure login manager @login_manager.user_loader def load_user(userid): return User.query.get(userid) # create a simple view @app.route("/") def index(): return "success" with app.test_request_context("/"): # no one is logged in -- this is an anonymous user logout_user() # this should *not* raise an error blueprint.session.get("/noop") assert blueprint.session.token == None def test_sqla_delete_token(app, db, blueprint, request): class OAuth(OAuthConsumerMixin, db.Model): pass blueprint.storage = SQLAlchemyStorage(OAuth, db.session) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # Create an existing OAuth token for the service existing = OAuth( provider="test-service", token={"access_token": "something", "token_type": "bearer", "scope": ["blah"]}, ) db.session.add(existing) db.session.commit() assert len(OAuth.query.all()) == 1 assert blueprint.token == { "access_token": "something", "token_type": "bearer", "scope": ["blah"], } del blueprint.token assert blueprint.token == None assert len(OAuth.query.all()) == 0 def test_sqla_overwrite_token(app, db, blueprint, request): class OAuth(OAuthConsumerMixin, db.Model): pass blueprint.storage = SQLAlchemyStorage(OAuth, db.session) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) # Create an existing OAuth token for the service existing = OAuth( provider="test-service", token={"access_token": "something", "token_type": "bearer", "scope": ["blah"]}, ) db.session.add(existing) db.session.commit() assert len(OAuth.query.all()) == 1 with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 2 # check that the database record was overwritten authorizations = OAuth.query.all() assert len(authorizations) == 1 oauth = authorizations[0] assert oauth.provider == "test-service" assert isinstance(oauth.token, dict) assert oauth.token == { "access_token": "foobar", "token_type": "bearer", "scope": [""], } def test_sqla_cache(app, db, blueprint, request): cache = Cache(app) class OAuth(OAuthConsumerMixin, db.Model): pass blueprint.storage = SQLAlchemyStorage(OAuth, db.session, cache=cache) db.create_all() def done(): db.session.remove() db.drop_all() request.addfinalizer(done) with record_queries(db.engine) as queries: with app.test_client() as client: # reset the session before the request with client.session_transaction() as sess: sess["test-service_oauth_state"] = "random-string" # make the request resp = client.get( "/login/test-service/authorized?code=secret-code&state=random-string", base_url="https://a.b.c", ) # check that we redirected the client assert resp.status_code == 302 assert resp.headers["Location"] == "https://a.b.c/oauth_done" assert len(queries) == 2 expected_token = {"access_token": "foobar", "token_type": "bearer", "scope": [""]} # check the database authorizations = OAuth.query.all() assert len(authorizations) == 1 oauth = authorizations[0] assert oauth.provider == "test-service" assert isinstance(oauth.token, dict) assert oauth.token == expected_token # cache should be invalidated assert cache.get("flask_dance_token|test-service|None") is None # first reference to the token should generate SQL queries with record_queries(db.engine) as queries: assert blueprint.token == expected_token assert len(queries) == 1 # should now be in the cache assert cache.get("flask_dance_token|test-service|None") == expected_token # subsequent references should not generate SQL queries with record_queries(db.engine) as queries: assert blueprint.token == expected_token assert len(queries) == 0
singingwolfboy/flask-dance
tests/consumer/storage/test_sqla.py
Python
mit
24,167
# -*- coding: utf-8 -*- __author__ = 'eso' import sys sys.path.append('../../') from tools.petscan import PetScan import re import requests import pywikibot list_of_pages =[1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1191, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1243, 1244] header = '''<noinclude><pagequality level="1" user="THEbotIT" /><div class="pagetext">{{Seitenstatus2||[[Staatsvertrag von Saint-Germain-en-Laye]]. In: Staatsgesetzblatt für die Republik Österreich. Jahrgang 1920, S. 995–1244|Staatsvertrag von Saint-Germain-en-Laye|}}{{BlockSatzStart}} </noinclude>''' footer = '''<noinclude>{{BlockSatzEnd}}{{Zitierempfehlung|Projekt=: ''[[Staatsvertrag von Saint-Germain-en-Laye]]. In: Staatsgesetzblatt für die Republik Österreich. Jahrgang 1920, S. 995–1244''. Österreichische Staatsdruckerei, Wien 1920|Seite=%s}}</div></noinclude>''' with open('saintgermain.txt', mode='r', encoding='utf8') as rawfile: text = re.split('\{\{Seite\|\d{3,4}\|\|Staatsgesetzblatt_\(Austria\)_1920_\d{4}\.jpg\}\}', rawfile.read()) site = pywikibot.Site() for idx, i in enumerate(list_of_pages): if i == 995: continue if i < 1000: lemma = 'Seite:Staatsgesetzblatt (Austria) 1920 0{}.jpg'.format(i) else: lemma = 'Seite:Staatsgesetzblatt (Austria) 1920 {}.jpg'.format(i) page = pywikibot.Page(site, lemma) page.text = header + text[idx] + (footer % i) page.save(summary='Automatische Konvertierung von PR1 zu PR2', botflag=True)
the-it/WS_THEbotIT
archive/online/2015/150914_add_pages_Saint_Germain.py
Python
mit
3,440
"""LombardiSpirograph - draw rotationally symmetric drawings in Lombardi style David Eppstein, UC Irvine, March 2010 For usage information type "python LombardiSpirography.py" without any additional arguments. """ from pyx import canvas,path,color from optparse import OptionParser from math import * import sys # ============================================================ # Pre-determined graphs by name # ============================================================ namedGraphs = { "andrasfai4": "11-ad", "andrasfai5": "14-gad", "andrasfai6": "17-dag", "andrasfai7": "20-jdag", "antihole7": "7-cb", "antihole8": "8-dab", "antihole9": "9-cab", "antiprism4": "4-a1-a", "antiprism5": "5-a1-a", "antiprism6": "6-a1-a", "brinkmann": "7-c2-1-b", "c5xc6": "15-c5-c", "cogwheel3": "3-x-1-0", "cogwheel4": "4-x-1-0", "cogwheel5": "5-x-1-0", "cogwheel6": "6-x-1-0", "complete5": "5-ab", "complete6-a": "6-cab", "complete6-b": "5-x-ab", "complete6-c": "3-a02-a", # minimum-crossing drawing "complete7": "7-bac", "complete8-a": "8-dcab", "complete8-b": "7-x-bac", "crown5": "10-ac", "crown6": "6-a04-a", "crown7": "14-cae", "cube": "4-a-a", "cuboctahedron": "4-a1-1-a", "dodecahedron-a": "5-a-1-0-a", "dodecahedron-b": "10-a-b", "desargues": "10-a-c", "durer": "6-a-b", "dyck": "8-a-2-0-c", "f40": "10-a-4-0-a", "grotzsch": "5-x-1-b", "hypercube": "8-c2-a", "icosahedron": "3-a01-01-1-a", "icosidodecahedron": "10-a1-2-b", "k33": "6-ca", "k44": "8-ca", "k55": "10-eac", "k66": "12-eac", "mobiuskantor": "8-a-c", "nauru": "12-a-e", "octahedron": "3-a1-a", "paley13": "13-dac", "pappus": "6-c2-0-a", "prism3": "3-a-a", "prism5": "5-a-a", "prism6": "6-a-a", "petersen": "5-a-b", # "shrikhande": "8-ba1-bc", # requires arcs to pass through vertices "sun3": "3-a1-0", "sun4": "4-ba1-0", "sun5": "5-ba1-0", "sun6": "6-cba1-0", "tetrahedron-a": "3-x-a", "tetrahedron-b": "4-ba", "utility": "6-ca", "wagner": "8-da", "wheel4": "4-x-a", "wheel5": "5-x-a", "wheel6": "6-x-a", } # ============================================================ # Command-line options # ============================================================ parser = OptionParser() parser.add_option("-f","--format", dest="show_format", action="store_true", help = "describe the graph input format and exit") parser.add_option("-n","--names", dest="show_names", action="store_true", help = "show a description of graph names and exit") parser.add_option("-s","--scale", dest="scale", action="store", type="float", default="1.0", help = "size of overall drawing relative to default") parser.add_option("-r","--radius",dest="radius", action="store", type="float", default="1.0", help = "radius of vertices relative to default") parser.add_option("-c","--color",dest="color", action="store", type="string", default="red", help = "vertex color (e.g. blue or 76B3DF)") parser.add_option("-o","--outline", dest="outline", action="store_true", help = "avoid drawing outlines around vertices") options,args = parser.parse_args() def abort(message): print >>sys.stderr,message sys.exit(-1) graphName = "-".join(args).lower() if options.show_format: if graphName: abort("--format option does not take any arguments") print '''The graph should be described as a sequence of alphanumeric words, separated either by spaces or by blank lines. The first word gives the order of symmetry of the drawing (the number of vertices in each concentric layer) and each subsequent word describes the vertices in a single layer of the graph. Each word after the first takes the form of a (possibly empty) sequence of letters followed by a (possibly empty) number. The letters describe edges connecting two vertices in the same layer: "a" for a connection between consecutive vertices in the same layer, "b" for a connection between vertices two steps away from each other, etc. The letters should be listed in the order the connections should appear at the vertex, starting from the edge closest to the center of the drawing and progressing outwards. Only connections that span less than half the circle are possible, except that the first layer may have connections spanning exactly half the circle. The numeric part of a word describes the connection from one layer to the next layer. If this number is zero, then vertices in the inner layer are connected to vertices in the next layer radially by straight line segments. Otherwise, pairs of vertices from the inner layer, the given number of steps apart, are connected to single vertices in the outer layer. A nonzero number written with a leading zero (e.g. "01" in place of "1") indicates that, as well as connections with the given number of steps, there should also be a radial connection from the inner layer to the next layer that has vertices aligned with it; this may not necessarily be the layer immediately outward. In the innermost layer, the special word "x" may be used to indicate that the layer consists of a single vertex at the center of the drawing. "x0" indicates that this central vertex is connected both to every vertex in the adjacent layer and also to every vertex in the next layer that is staggered with respect to the inner two layers. ''' sys.exit(0) if options.show_names: if graphName: if graphName not in namedGraphs: print '''Graph name''',graphName,'''is not recognized. Run python LombardiSpirograph --names without any command line arguments to get a list of recognized names.''' else: print graphName,"is equivalent to",namedGraphs[graphName] sys.exit(0) print '''This program has built into it a set of graph names that may be used as the command-line argument to specify the graph to be drawn. They are: ''' graphs = namedGraphs.items() graphs.sort() graphs = [("Name","Description"),("====","===========")] + graphs for name,description in graphs: print " " + name + " "*(20-len(name)) + description sys.exit(0) if not graphName: print '''This program draws rotationally-symmetric graphs in Lombardi style: the edges are circular arcs that meet at equal angles at each vertex. To use it, type python LombardiSpirograph.py [graph] >output.svg to a command line, where [graph] is replaced by a name or description of the graph to be drawn. For a list of available graph names, type python LombardiSpirograph.py --names For help with the input format for graph descriptions, type python LombardiSpirograph.py --format For a list of other command line options, type python LombardiSpirograph.py --help ''' sys.exit(0) # ============================================================ # Command line parsing # ============================================================ if graphName in namedGraphs: graphName = namedGraphs[graphName] try: # Split command line argument into symmetry and level descriptors nameComponents = graphName.split("-") symmetry = int(nameComponents[0]) vertexDescriptors = nameComponents[1:] levels = len(vertexDescriptors) # Parse out the X for the descriptor at the inner level central = [False]*levels radialzero = False if vertexDescriptors[0] == "x": vertexDescriptors[0] = "" central[0] = True elif vertexDescriptors[0] == "x0": vertexDescriptors[0] = "" central[0] = True radialzero = True # Parse out the letters for the circulant at each level circulant = [None]*levels for i in range(levels): circulant[i] = [ord(x) - ord('a') + 1 for x in vertexDescriptors[i] if x >= "a" and x < "x"] vertexDescriptors[i] = vertexDescriptors[i][len(circulant[i]):] # Parse out the numbers for which other vertex at this level # connects to the same vertex at the next level connector = [0]*levels radial = [False]*levels for i in range(levels): if vertexDescriptors[i]: connector[i] = int(vertexDescriptors[i]) if connector[i] and vertexDescriptors[i][0] == "0": radial[i] = True if radialzero: radial[0] = True except: abort('''Unable to parse command line arguments. For usage type "python LombardiSpirography.py help"''') # ============================================================ # Sanity checks # ============================================================ if connector[-1]: abort("Outer level should not specify connector to next level") threshold = symmetry for c in circulant: for offset in c: if offset * 2 > threshold: abort("Circulant specification goes too far") threshold = symmetry - 1 for offset in connector: if offset >= symmetry: # if offset * 2 > symmetry: abort("Connector specification goes too far") # ============================================================ # Preliminary calculations # ============================================================ stagger = [False]*levels s = False for i in range(levels): stagger[i] = s if connector[i] % 2: s = not s if central[0] and radial[0]: stagger[0] = True inRadial = [False]*levels for i in range(levels): if radial[i]: conn = [j for j in range(i+1,levels) if stagger[i] == stagger[j]] if not conn: abort("No layer outward of %s with same stagger" % i) radial[i] = conn[0] inRadial[conn[0]] = True indegree = [1]*(levels+1) for i in range(levels): if connector[i]: indegree[i+1] += 1 if inRadial[i]: indegree[i] += 1 indegree[0] = indegree[levels] = 0 # Fudge factor for some angle computations incount = [indegree[i]+1 for i in range(levels)] if circulant[0] and circulant[0][0]*2 == symmetry: incount[0] = 0 degree = [0]*levels for i in range(levels): if central[i]: degree[i] = symmetry else: degree[i] = indegree[i] + indegree[i+1] degree[i] += 2*len(circulant[i]) if i == 0 and symmetry % 2 == 0 and symmetry // 2 in circulant[i]: degree[i] -= 1 for i in range(levels): mid = -1 if degree[i] % 2 == 0: # Even degree: if i == 0: if levels == 1: # Only a single level if len(circulant[i]) % 2 == 1: mid = len(circulant[i])//2 else: if len(circulant[i]) % 2 == 0: mid = len(circulant[i])//2 elif i < levels - 1: if len(circulant[i]) % 2 == 1: mid = len(circulant[i])//2 elif len(circulant[i]) % 2 == 0: mid = len(circulant[i])//2 - 1 if mid >= 0 and mid < len(circulant[i]) and circulant[i][mid] != 1: abort("Central circulant edge can only be adjacent") # Test which edges within a single level follow line segments instead of arcs straightCirculant = [[False]*len(circulant[i]) for i in range(levels)] for i in range(levels): for j in range(len(circulant[i])): if 2*(degree[i]*circulant[i][j]+symmetry*(2*j+incount[i])) \ == degree[i]*symmetry: straightCirculant[i][j] = True # ============================================================ # Calculate vertex placements and connector radii # ============================================================ def cot(x): """Cotangent(x). Why is this not in math?""" return tan(pi/2-x) def distance(p,q): """Euclidean distance from p to q""" return ((p.real - q.real)**2 + (p.imag - q.imag)**2)**0.5 radius = [0]*levels # how far from origin to make each layer conrad = [0]*levels # distance point to ctr of curvature of connector straightConnector = [False]*levels for i in range(levels): if central[i]: radius[i] = 0 elif i == 0: radius[i] = symmetry / (2 * pi) # arclen 1 around initial circle elif connector[i-1] == 0: factor = (1 + 2 * pi / symmetry) if i > 1 and not circulant[i-1]: if connector[i-2] % 2 != 0: factor = (1 + factor)/2 # hack dodecahedron less exponential elif radius[i-2]: factor = min(factor,2-radius[i-2]/radius[i-1]) # hack pappus and dyck to be less exponential radius[i] = max(1,radius[i-1] * factor) # same radial length as previous arclen else: # calculate some angles # p = inner point at level i-1 # q = unknown location of point at level i # o = origin # O = point on line op on the other side of p # c = center of curvature of connecting arc correction = 1 if radial[i-1]: correction = 2 poq = pi*connector[i-1]/symmetry pcq = (inRadial[i]+1)*pi/degree[i] - correction*pi/degree[i-1] + poq Opq = correction*pi/degree[i-1] + pcq/2 try: radius[i] = radius[i-1] / ((cot(poq)-cot(Opq))*sin(poq)) except: abort("Unable to make connecting arcs from level %s to level %s" \ % (i-1,i)) if abs(pcq) < 0.01: straightConnector[i-1] = True else: scr = distance(radius[i-1],radius[i]*e**(1j*poq))/2 conrad[i-1] = scr/sin(pcq/2) if i > 0 and radius[i] < radius[i-1]: abort("Inverted levels") # ============================================================ # Generate graphical objects # ============================================================ def rotations(level): if central[level]: return [1j] return [1j*e**(pi*1j*i/symmetry) for i in range(int(stagger[level]),2*symmetry,2)] def vertices(): """Return sequence of complex numbers representing vertex positions""" for i in range(levels): for r in rotations(i): yield radius[i]*r def segments(): """Return sequence of vertex pairs to be connected by line segments""" if circulant[0] and circulant[0][0]*2 == symmetry: # cross-edges in center for i in range(circulant[0][0]): r = radius[0]*e**(pi*2j*i/symmetry) yield 1j*r,-1j*r for i in range(levels): if indegree[i] == 1: for r in rotations(i): yield radius[i-1]*r,radius[i]*r for j in range(len(circulant[i])): if straightCirculant[i][j]: p = radius[i] q = p * e**(pi*2j*circulant[i][j]/symmetry) for r in rotations(i): yield p*r,q*r if straightConnector[i]: p = radius[i] q = radius[i+1] poq = e**(1j*pi*connector[i]/symmetry) for r in rotations(i): yield p*r,q*poq*r yield p*r,q*r/poq if radial[i]: for r in rotations(radial[i]): yield radius[i]*r,radius[radial[i]]*r maxRadius = radius[-1] def circulants(): """Return sequence of all arcs within a single level and compute maxRadius as we do""" global maxRadius for i in range(levels): if central[i]: continue rot = rotations(i) p = radius[i] * rot[0] for j in range(len(circulant[i])): if straightCirculant[i][j]: continue step = circulant[i][j] if step*2 == symmetry: continue q = radius[i] * rot[step] d = distance(q,p) theta = pi*step/symmetry # angle p-origin-circle center psi = pi*(2*j+incount[i])/degree[i] # angle o-p-cc bulgy = theta + psi > pi/2 if bulgy: psi -= pi/2 else: psi += pi/2 rad = abs(d/(2 * sin(pi - theta - psi))) phi = pi/2 - theta - psi # angle q-p-cc if bulgy: rtoc = radius[i]*cos(theta) - rad*sin(phi) maxRadius = max(maxRadius,rtoc+rad) for k in range(symmetry): r = e**(pi*2j*k/symmetry) if not bulgy: yield (p*r,q*r,rad) elif theta + psi < pi/2: yield (q*r,p*r,rad) else: yield (p*r,q*r,-rad) def connectors(): """Return sequence of all arcs from one level to the next""" for i in range(levels): if connector[i] and not straightConnector[i]: p = radius[i] q = radius[i+1] poq = e**(1j*pi*connector[i]/symmetry) for r in rotations(i): if conrad[i] > 0: yield q*poq*r,p*r,conrad[i] yield p*r,q*r/poq,conrad[i] else: yield p*r,q*poq*r,-conrad[i] yield q*r/poq,p*r,-conrad[i] def arcs(): """Return sequence of vertex,vertex,radius triples""" for arc in circulants(): yield arc for arc in connectors(): yield arc # ============================================================ # Draw the layout # ============================================================ # Dummy pass through arcs to find max radius for a in arcs(): pass scale = 25 * options.scale vertexRadius = 5 * options.radius * options.scale bbox = maxRadius*scale + vertexRadius + 1 bbox = 1+int(bbox) print '''<?xml version="1.0" standalone="no"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg width="%s" height="%s" viewBox="-%s -%s %s %s" xmlns="http://www.w3.org/2000/svg" version="1.1">''' % \ (2*bbox+1,2*bbox+1,0,0,2*bbox+1,2*bbox+1) def place(p): """Convert unit-free complex to pixel-scaled real coordinates""" return p.real*scale+bbox+1,p.imag*scale+bbox+1 print ' <g style="fill:none; stroke:black">' for p,q in segments(): print ' <line x1="%0.2f" y1="%0.2f" x2="%0.2f" y2="%0.2f" />' % \ (place(p)+place(q)) for p,q,r in arcs(): la = 0 if r < 0: # flag for large arc la = 1 r = -r print ' <path d="M %0.2f,%0.2f A %0.2f,%0.2f 1 %s %s %0.2f,%0.2f" />' % \ (place(p)+(scale*r,scale*r,la,la)+place(q)) print ' </g>' print ' <g style="fill:%s; stroke:%s">' % (options.color, (options.outline and "none" or "black")) for v in vertices(): print ' <circle cx="%0.2f" cy="%0.2f" r="%s" />' % \ (place(v)+(vertexRadius,)) print ' </g>' print '</svg>'
MHenderson/graph-visualisations
lib/lombardi/LombardiSpirograph.py
Python
mit
18,845
# -*- coding: utf-8 -*- import os import pygame from pygame.locals import * class Sprite(pygame.sprite.Sprite): def __init__(self,SpriteName): pygame.sprite.Sprite.__init__(self) self.Name = SpriteName self.rect = 0 self.image = 0 def getRect(self): return self.rect def getImg(self): return self.image def load_image(self, name, colorkey=None): #fullname = os.path.join('data', 'images') fullname = name + '.png' try: image = pygame.image.load(fullname) except pygame.error, message: print 'Cannot load image:', fullname raise SystemExit, message image = image.convert() if colorkey is not None: if colorkey is -1: colorkey = image.get_at((0,0)) image.set_colorkey(colorkey, RLEACCEL) return image, image.get_rect() class spritesheet(object): def __init__(self, filename): try: self.sheet = pygame.image.load(filename).convert() except pygame.error, message: print 'Unable to load spritesheet image:', filename raise SystemExit, message # Load a specific image from a specific rectangle def image_at(self, rectangle, colorkey = None): "Loads image from x,y,x+offset,y+offset" rect = pygame.Rect(rectangle) image = pygame.Surface(rect.size).convert() image.blit(self.sheet, (0, 0), rect) if colorkey is not None: if colorkey is -1: colorkey = image.get_at((0,0)) image.set_colorkey(colorkey, pygame.RLEACCEL) return image, rect # Load a whole bunch of images and return them as a list def images_at(self, rects): "Loads multiple images, supply a list of coordinates" return [self.image_at(rect) for rect in rects], rect # Load a whole strip of images def load_strip(self, rect, image_count, colorkey = None): "Loads a strip of images and returns them as a list" tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3]) for x in range(image_count)] return self.images_at(tups, colorkey)
vtungn/HackaPanzer
Sprite.py
Python
mit
2,212
# how many routes are in a 20x20 lattice grid? import time start_time = time.clock() field = [] sum = 0 for n in range(0,21): field.append([1]) for i in range(1,21): sum = field[n][i-1] if n>0: sum += field[n-1][i] field[n].append(sum) print(field[20][20]) print("--- %s seconds ---" % (time.clock() - start_time))
Selen93/ProjectEuler
Python/Problem 15/Problem 15.py
Python
mit
349
"""empty message Revision ID: 3cee55aae20 Revises: None Create Date: 2015-08-30 22:42:28.320071 """ # revision identifiers, used by Alembic. revision = '3cee55aae20' down_revision = None from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('results', sa.Column('id', sa.Integer(), nullable=False), sa.Column('url', sa.String(), nullable=True), sa.Column('result_all', postgresql.JSON(), nullable=True), sa.Column('result_no_stop_words', postgresql.JSON(), nullable=True), sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('results') ### end Alembic commands ###
theMentalItchGuy/NLP
migrations/versions/3cee55aae20_.py
Python
mit
847
#!/usr/bin/env python2 # Imports import json import datetime from bin import monitor, register from ClientSettings import ClientSettings from ClientSettings import constants import dataCollector import settings import time import sys Monitor = monitor.Monitor() message1 = "Client script running on version: {0}".format(ClientSettings.VERSION) message2 = "Your version is still maintained: {0}".format('True') class StartMonitor: def __init__(self): print message1, message2 print 'Initialising...' def monitor(self): while 1: MonitorData = self.collect_data() MonitorData = self.check_need_to_log(MonitorData) self.write_data(MonitorData) self.upload_data() self.update_count_requests() self.print_data(MonitorData) print @staticmethod def collect_data(): MonitorData = dataCollector.get_all_data() return MonitorData @staticmethod def print_data(MonitorData): print '\-----------------------------------System Statistics--------------------------------------\\' print ' Your Network load is at the moment sent: {0} Bytes, and received: {1} Bytes.'.format(MonitorData[3][0], MonitorData[3][1]) print ' Your CPU load is at the moment: {0}%.'.format(MonitorData[2]) print ' Your RAM usage is at the moment: {0}%.'.format(MonitorData[4]) print ' Your DISK usage is at the moment: {0}%.'.format(MonitorData[5]) print ' You currently have {0} read, and {1} written.'.format(MonitorData[6][0], MonitorData[6][1]) print ' Your ServerId is: {0}.'.format(MonitorData[1]) print ' Your HostName is: {0}.'.format(MonitorData[0]) print ' Last reported message ID is: {0}.'.format(MonitorData[8]) print ' Last reported message is: {0}.'.format(MonitorData[7]) print ' This is request: {0}.'.format(constants.REQUEST_COUNT) print '\------------------------------------------------------------------------------------------\\' @staticmethod def write_data(MonitorData): with open(settings.JSON_FILE, 'r+') as f: json_data = json.load(f) json_data["RequestDetails"]["Time"]["RequestSent"] = str(time.time()) json_data["Server"]["ServerDetails"]["NetworkLoad"]["Sent"] = MonitorData[3][0] json_data["Server"]["ServerDetails"]["NetworkLoad"]["Received"] = MonitorData[3][1] json_data["Server"]["ServerDetails"]["ServerName"] = MonitorData[0] json_data["Server"]["ServerDetails"]["CPU_Usage"] = MonitorData[2] json_data["Server"]["ServerDetails"]["ServerKey"] = MonitorData[1] json_data["Server"]["ServerDetails"]["Ram_Usage"] = MonitorData[4] json_data["Server"]["ServerDetails"]["Disk_Usage"] = MonitorData[5] json_data["Server"]["ServerDetails"]["Disk_Load"]["Read"] = MonitorData[6][0] json_data["Server"]["ServerDetails"]["Disk_Load"]["Write"] = MonitorData[6][1] if MonitorData[8]: if Monitor.getLastLogID() < float(MonitorData[8]): json_data["Server"]["Messages"]["Log"] = MonitorData[7] json_data["Server"]["Messages"]["AlertID"] = MonitorData[8] json_data["Server"]["Messages"]["Alert"] = True else: json_data["Server"]["Messages"]["Alert"] = False f.seek(0) f.write(json.dumps(json_data)) f.truncate() @staticmethod def upload_data(): # print 'Sending json.' Monitor.SendJsonToServer() @staticmethod def update_count_requests(): constants.REQUEST_COUNT += 1 return constants.REQUEST_COUNT def check_need_to_log(self, MonitorData): if not MonitorData: pass CPU = MonitorData[2] RAM = MonitorData[4] if float(CPU) > 75: MonitorData = self.log_message(target='CPU', spike=CPU, MonitorData=MonitorData) elif float(RAM) > 75: MonitorData = self.log_message(target='RAM', spike=RAM, MonitorData=MonitorData) else: MonitorData = MonitorData MonitorData[7] = 'None' MonitorData[8] = 0 self.log_message(target='None', spike=0, MonitorData=MonitorData) return MonitorData @staticmethod def log_message(target, spike, MonitorData): if target == 'CPU': message = 'There has been a CPU usage spike of: {0}%!'.format(spike) elif target == 'RAM': message = 'There has been a RAM usage spike of: {0}%!'.format(spike) elif target == 'None': message = '' else: message = 'There is an unexpected spike, we are not sure where it is coming from, ' \ 'but the value is: {0}'.format(spike) MonitorData[7] = message LastID = Monitor.getLastLogID() LastID += 1 MonitorData[8] = LastID return MonitorData if __name__ == '__main__': Arguments = sys.argv try: if Arguments[1].lower() == '-r': print 'Registering...' register = register.Register() key = register.register_agent() with open(ClientSettings.FILE_DIR + 'details.json', 'r+') as f: json_data = json.load(f) json_data["ServerDetails"]["ServerKey"] = key f.seek(0) f.write(json.dumps(json_data)) f.truncate() else: constants.REGISTER = False except IndexError: constants.REGISTER = False StartMonitor = StartMonitor() StartMonitor.monitor()
Evert-Arends/AuroraPlusClient
run.py
Python
mit
5,859
import importlib import json from .base import MongoEnginericsAdapter class ApistarWSGIAdapter(MongoEnginericsAdapter): def __init__(self, *args, **kwargs): self.engine = importlib.import_module('apistar') self._wsgi = importlib.import_module('apistar.frameworks.wsgi') super(ApistarWSGIAdapter, self).__init__(*args, **kwargs) def attach(self, ctrl): def find(query: self.engine.http.QueryParams): return ctrl.find(query) def update(item_id, updates: self.engine.http.Body): return ctrl.update(item_id, json.loads(updates)) def create(body: self.engine.http.Body): return ctrl.create(json.loads(body)) def find_one(item_id): return ctrl.find_one(item_id) def delete(item_id): return ctrl.delete(item_id) return self.engine.Include('/{}'.format(ctrl.name), [ self.engine.Route('/', 'GET', find), self.engine.Route('/', 'POST', create), self.engine.Route('/{item_id}', 'GET', find_one), self.engine.Route('/{item_id}', 'PUT', update), self.engine.Route('/{item_id}', 'DELETE', delete), ]) def get_app(self): routes = [self.attach(ctrl()) for ctrl in self._controllers] return self._wsgi.WSGIApp(routes=routes)
monumentum/mongoenginerics
mongoenginerics/adapter/apistar.py
Python
mit
1,346
# -*- coding: utf-8 -*- """ Module for making calls to EPO-OPS REST-API. This module contain classes and functions to get data from [EPO-OPS API](http://www.epo.org/searching-for-patents/technical/espacenet/ops.html) """ import logging import re import time from base64 import b64encode from collections import namedtuple from datetime import datetime, timedelta import requests from .constants import AUTH_URL, URL_PREFIX, VALID_ENDPOINTS, \ VALID_IDTYPES from epo_utils.exceptions import FetchFailed, QuotaPerHourExceeded, \ QuotaPerWeekExceeded from epo_utils.ops import Services, ReferenceType try: import requests_cache except ImportError: _HAS_CACHE = False else: _HAS_CACHE = True from .documents import DocumentID class APIInput: """ Encapsulation of API-input. Provides ID-formatting and interface to :class:`epo_utils.documents.DocumentID`. Attributes ---------- id_type : str ID-format (epodoc, docdb, original). number : str Document number. kind : str Document kind code. country : str Country code. date : str Date as YYYYMMDD-string. """ def __init__(self, id_type, number, kind=None, country=None, date=None): if id_type not in VALID_IDTYPES: raise ValueError('invalid id_type: {}'.format(id_type)) if date is not None: date = str(date) try: datetime.strptime(date, '%Y%m%d') except ValueError: raise ValueError('date must be in YYYYMMDD-format') else: if len(date) != 8: raise ValueError('date must be in YYYYMMDD-format') if country is not None and not country.strip(): raise ValueError('country cant be empty if provided') if kind is not None and not kind.strip(): raise ValueError('kind cant be empty if provided') self.id_type = id_type self.number = str(number) self.kind = kind self.country = country self.date = date @classmethod def from_document_id(cls, document_id): """ Convert instance of :class:`epo_utils.documents.DocumentID` to `APIInput`. Parameters ---------- document_id : epo_utils.documents.DocumentID Document-ID to translate. Returns ------- APIInput """ if not isinstance(document_id, DocumentID): raise ValueError('document_id must be DocumentID-instance') return cls(document_id.id_type, document_id.doc_number, document_id.kind, document_id.country, document_id.date) def to_id(self): """ Format as valid API-input ID. Returns ------- str """ if (',' in self.number or '.' in self.number or '/' in self.number) \ and self.id_type != 'classification': number = '({})'.format(self.number) else: number = self.number parts = [part for part in [self.country, number, self.kind, self.date] if part is not None] if self.id_type == 'original': id_ = '.'.join(parts).replace(' ', '%20') elif self.id_type == 'docdb': id_ = '.'.join(parts) elif self.id_type == 'epodoc': if self.date is not None: id_ = ''.join(parts[:-1]) id_ += '.' + self.date else: id_ = ''.join(parts) elif self.id_type == 'classification': return number else: raise ValueError('invalid id_type: {}'.format(self.id_type)) return id_ def __repr__(self): module = self.__class__.__module__ class_name = self.__class__.__name__ return '<{0}.{1}: {2}>'.format(module, class_name, self.to_id()) class Token(namedtuple('Token', ['token', 'expires'])): """ Wrapper around access-token. """ class EPOClient: """ Client to call EPO-OPS REST-API using `requests`. Features auto-throttling based on OPS throttling headers and automatic retries on server-side error codes. Parameters ---------- accept_type : str Http accept type. key : str, optional EPO OPS user key. secret : str, optional EPO OPS user secret. cache : bool If True, try to use `requests_cache` for caching. Default False. cache_kwargs : dict, optional. Passed to :py:func:`requests_cache.install_cache` as keyword arguments if provided. max_retries : int Number of allowed retries at 500-responses. retry_timeout : float, int Timeout in seconds between calls when retrying at 500-responses. Attributes ---------- secret : str key : str token : Token or None quota_per_hour_used : int quota_per_week_used : int """ HAS_FULLTEXT = {'EP'} def __init__(self, accept_type='xml', key=None, secret=None, cache=False, cache_kwargs=None, max_retries=1, retry_timeout=10): try: _check_epoclient_input(accept_type, key, secret, cache, cache_kwargs, max_retries, retry_timeout) except AssertionError as e: raise ValueError(str(e)) if accept_type.startswith('application/'): self.accept_type = accept_type else: self.accept_type = 'application/{}'.format(accept_type) if cache and _HAS_CACHE: logging.info('Installs cache.') requests_cache.install_cache(**(cache_kwargs or dict())) elif cache: raise ValueError('cache is set to True but requests_cache ' 'is not available.') self.secret = secret self.key = key self.max_retries = max_retries self.retry_timeout = retry_timeout self.quota_per_hour_used = 0 self.quota_per_week_used = 0 if all([secret, key]): logging.debug('Auth provided.') self.token = self.authenticate() else: logging.debug('Auth not provided') self.token = None self._last_call = { 'search': None, 'retrieval': None, 'inpadoc': None, 'images': None, 'other': None } self._next_call = self._last_call.copy() def fetch(self, service, ref_type, api_input, endpoint='', options=None, extra_headers=None): """ Generic function to fetch data from the EPO-OPS API. Parameters ---------- service : epo_utils.ops.Services OPS-service to fetch from. ref_type : epo_utils.ops.ReferenceType OPS-reference type of data to fetch. api_input : APIInput, list[APIInput] Input to API-call. endpoint : str API-endpoint to call. options : list, optional API-call constitents. extra_headers : dict, optional Additional or custom headers to be used. use_post : bool If True, POST will be used for request. Returns ------- requests.Response """ if not isinstance(ref_type, ReferenceType): raise ValueError('invalid ref_type: {}'.format(ref_type)) if not isinstance(service, Services): raise ValueError('invalid service: {}'.format(service)) if endpoint not in VALID_ENDPOINTS: raise ValueError('invalid endpoint: {}'.format(endpoint)) try: input_text = ','.join(i.to_id() for i in api_input) except TypeError: input_text = api_input.to_id() id_types = {api_input.id_type} else: id_types = {i.id_type for i in api_input} if len(id_types) > 1: raise ValueError('non-matching id-types') options = options or list() url = build_ops_url(service, ref_type, id_types.pop(), endpoint, options) headers = self._make_headers(extra_headers) logging.debug('Makes request to: {}\nheaders: {}'.format(url, headers)) logging.info('fetches {}'.format(input_text)) try: response = self.post('retrieval', url, input_text, headers=headers) except requests.HTTPError as e: if e.response.status_code == requests.codes.not_found: logging.error('{} not found'.format(input_text)) raise FetchFailed(input_text) else: raise logging.info('Fetch succeeded.') return response def search(self, query, fetch_range, service=Services.PublishedSearch, endpoint='', extra_headers=None): """ Post a GET-search query. Parameters ---------- query : str Query string. fetch_range : tuple[int, int] Get entries `fetch_range[0]` to `fetch_range[1]`. service : Services Which service to use for search. endpoint : str, list[str] Endpoint(s) to search. extra_headers : dict, optional Additional or custom headers to be used. Returns ------- requests.Response """ if not isinstance(service, Services): raise ValueError('invalid service: {}'.format(service)) if not isinstance(endpoint, (list, tuple)): endpoint = [endpoint] if not all(e in VALID_ENDPOINTS for e in endpoint): invalid = filter(lambda e: e not in VALID_ENDPOINTS, endpoint) raise ValueError('invalid endpoint: {}'.format(next(invalid))) if not len(fetch_range) == 2 \ and all(isinstance(i, int) for i in fetch_range): raise ValueError('invalid fetch_range: {}'.format(fetch_range)) headers = self._make_headers( {'Accept': 'application/exchange+xml', 'X-OPS-Range': '{}-{}'.format(*fetch_range)} ) headers.update(extra_headers or dict()) url = build_ops_url(service, options=endpoint) logging.info('Sends query: {}'.format(query)) response = self.post('search', url, headers=headers, data={'q': query}) logging.info('Query successful.') return response def authenticate(self): """ If EPO-OPS customer key and secret is available get access-token. Returns ------- token : Token Token and expiration time. """ if not all([self.secret, self.key]): return None logging.info('Attempts to authenticate.') # Post base 64-encoded credentials to get access-token. credentials = '{0}:{1}'.format(self.key, self.secret) encoded_creds = b64encode(credentials.encode('ascii')).decode('ascii') headers = {'Authorization': 'Basic {}'.format(encoded_creds)} payload = {'grant_type': 'client_credentials'} response = requests.post(AUTH_URL, headers=headers, data=payload) response.raise_for_status() logging.info('Authentication succeeded.') # Parse response. content = response.json() token = content['access_token'] expires_in = int(content['expires_in']) expires = datetime.now() + timedelta(seconds=expires_in) token = Token(token, expires) return token def post(self, service, *args, **kwargs): """ Makes an auto-throttled POST to the OPS-API. Parameters ---------- service : str OPS-system called. *args Positional arguments passed to :py:`requests.post` **kwargs Keyword arguments passed to :py:`requests.post` Returns ------- requests.Response """ logging.debug( '{} POST\nargs: {}\nkwargs: {}'.format(service,args, kwargs)) response = self._retry(self._throttled_call, service, requests.post, *args, **kwargs) return response def get(self, service, *args, **kwargs): """ Makes an auto-throttled GET-call to the OPS-API. Parameters ---------- service : str OPS-system called. *args Positional arguments passed to :py:`requests.get` **kwargs Keyword arguments passed to :py:`requests.get` Returns ------- requests.Response """ logging.debug( '{} GET\nargs: {}\nkwargs: {}'.format(service, args, kwargs)) response = self._retry(self._throttled_call, service, requests.get, *args, **kwargs) return response def _retry(self, request, *args, **kwargs): """ Wrap `request` with retries at 500-responses. Parameters ---------- request : Callable Function which calls the OPS-API using `*args` and `**kwargs`. *args Positional arguments passed to `request` **kwargs Keyword arguments passed to :py:`request` Returns ------- Any result from `request` """ for attempts_left in range(self.max_retries + 1, -1, -1): try: result = request(*args, **kwargs) except requests.HTTPError as e: if e.response.status_code >= 500 and attempts_left > 0: logging.info( 'Server error ({} attempts left). Timeouts and retries ' 'in {}.'.format(attempts_left, self.retry_timeout)) time.sleep(self.retry_timeout) else: raise else: break return result def _throttled_call(self, service, request, *args, **kwargs): """ Wrap `request` with auto-throttle. Parameters ---------- service : str OPS-service to call. request : Callable Function which calls the OPS-API using `*args` and `**kwargs`. *args Positional arguments passed to `request` **kwargs Keyword arguments passed to :py:`request` Returns ------- requests.Response """ logging.debug('Throttle with: {}'.format(service)) if service not in self._last_call: raise ValueError('Invalid service: {}'.format(service)) next_call = self._next_call[service] now = datetime.now() if next_call is not None and now < next_call: diff = next_call - now time.sleep(diff.seconds + diff.microseconds / 1e6) self._last_call[service] = datetime.now() response = request(*args, **kwargs) try: response.raise_for_status() except requests.HTTPError as error: if error.response.status_code == requests.codes.forbidden: raise_for_quota_rejection(error.response) raise error # Non-quota related rejection. # The OPS-API sets its request-limit by minute, which is updated # for each call. Therefore, the throttling delay is set to # 60 sec / calls per minute. throttle_header = response.headers['X-Throttling-Control'] pattern = r'{}=([a-z]+):(\d+)'.format(service) color, n_str = re.search(pattern, throttle_header).groups() n_per_min = int(n_str) delay = 60.0 / n_per_min # Delay in seconds. seconds = int(delay) milliseconds = int((delay - seconds) * 1e3) next_delta = timedelta(seconds=seconds, milliseconds=milliseconds) self._next_call[service] = self._last_call[service] + next_delta # Update quota used. q_per_h = int(response.headers['X-IndividualQuotaPerHour-Used']) q_per_w = int(response.headers['X-RegisteredQuotaPerWeek-Used']) self.quota_per_hour_used = q_per_h self.quota_per_week_used = q_per_w return response def _make_headers(self, extras=None): """ Prepare request headers. Parameters ---------- extras : dict, optional Extra headers which should be used. Returns ------- dict """ headers = {'Accept': self.accept_type} if self.token is not None: if self.token is None or datetime.now() > self.token.expires: # Refresh token if is expired or missing. self.token = self.authenticate() headers['Authorization'] = 'Bearer {}'.format(self.token.token) headers.update(extras or dict()) return headers def build_ops_url(service, reference_type=None, id_type=None, endpoint=None, options=None): """ Prepare an url for calling the OPS-API. If `only_input_format` is False the URL will be formatted as:: :py:const:`URL_PREFIX`/service/reference-type/inputformat/input/[endpoint]/[constituent(s)] Otherwise it will be formatted:: :py:const:`URL_PREFIX`/service/reference-type/inputformat/[endpoint]/[constituent(s)] Parameters ---------- service : Services OPS-service. reference_type : ReferenceType, optional Reference type to call. endpoint : str, optional Optional endpoint. options : list, optional Optional constituents. Returns ------- url : str Formatted url. """ url_parts = [ URL_PREFIX, service.value if service is not None else None, reference_type.value if reference_type is not None else None, id_type if input is not None else None, endpoint, ','.join(options) if options is not None else None ] present_parts = filter(None, url_parts) url = '/'.join(present_parts) logging.debug('Built url: {}'.format(url)) return url def raise_for_quota_rejection(response): """ Check the response for "X-Rejection-Reason"-header and raise if quota exceeded. Parameters ---------- response : requests.Response Response-object to check. Returns ------- None If quota isn't exceeded. Raises ------ QuotaPerWeekExceeded If rejection header is "RegisteredQuotaPerWeek". QuotaPerHourExceeded If rejection header is "IndividualQuotaPerHour" """ if response.status_code != requests.codes.forbidden: return rejection = response.headers.get('X-Rejection-Reason', None) if rejection is None: return if rejection == 'RegisteredQuotaPerWeek': logging.error('quota per week exceeded') raise QuotaPerWeekExceeded(response.text) elif rejection == 'IndividualQuotaPerHour': logging.error('quota per hour exceeded') raise QuotaPerHourExceeded(response.text) else: # Anonymous user-headers skipped since anonymous use will be # discontinued and this package does not support anyways. return def _check_epoclient_input(accept_type, key, secret, cache, cache_kwargs, max_retries, retry_timeout): """ Check input for :class:`EPOClient`. Parameters ---------- accept_type : str Http accept type. key : str, optional EPO OPS user key. secret : str, optional EPO OPS user secret. cache : bool If True, try to use `requests_cache` for caching. Default False. cache_kwargs : dict, optional. Passed to :py:func:`requests_cache.install_cache` as keyword arguments if provided. max_retries : int Number of allowed retries at 500-responses. retry_timeout : float Timeout in seconds between calls when retrying at 500-responses. Raises ------- AssertionError If input is bad. """ assert isinstance(accept_type, str), 'accept_type must be str' assert isinstance(key, str), 'key must be str' assert isinstance(secret, str), 'secret must be str' assert isinstance(cache, bool), 'cache must be boolean' assert isinstance(cache_kwargs, dict) or cache_kwargs is None, \ 'cache_kwargs must be dict or None' assert isinstance(max_retries, int) and max_retries >= 0, \ 'max_retries must be non-negative integer' assert isinstance(retry_timeout, (float, int)) and max_retries >= 0,\ 'retry_timeout must be non-negative number'
clicumu/epo_utils
epo_utils/api.py
Python
mit
20,708
import logging import message import os import random import datetime import time import collections from google.appengine.ext.webapp import template try: import simplejson as json except ImportError: import json from abstract_app import AbstractApp class CmdrData(AbstractApp): # A user who has authorized your app has checked in. This runs inside # AppEngine's task queue, and contains the check-in payload for you to # process. def checkinTaskQueue(self, client, checkin_json): logging.debug('Current checkin: %s', checkin_json) user_id = checkin_json['user']['id'] categories = checkin_json['venue']['categories'] category_name = find_primary_category(categories) now = datetime.datetime.now() tsd = datetime.timedelta(days=7) t = now - tsd epoch_seconds = int(time.mktime(t.timetuple())) limit = 100 parameters = {'limit': limit, 'afterTimestamp': epoch_seconds} week_checkins = client.users.checkins(user_id, parameters) logging.debug('Received the following JSON response from 4sq: %s', week_checkins) checkins = week_checkins['checkins']['items'] frequency = collections.defaultdict(int) for c in checkins: categories = c['venue']['categories'] frequency[find_primary_category(categories)] += 1 message_text = message.GetText(category_name, frequency[category_name]) if message_text: self.makeContentInfo( checkin_json = checkin_json, content = json.dumps({}), text = message_text, reply = True) def find_primary_category(categories): for category in categories: if category['primary']: return category['name']
PullRequestFive/CmdrData
cmdrdata/cmdrdata.py
Python
mit
1,690
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding unique constraint on 'Ratings', fields ['user', 'page'] db.create_unique('api_ratings', ['user_id', 'page_id']) def backwards(self, orm): # Removing unique constraint on 'Ratings', fields ['user', 'page'] db.delete_unique('api_ratings', ['user_id', 'page_id']) models = { 'api.blacklistitem': { 'Meta': {'unique_together': "(('user', 'url'),)", 'object_name': 'BlackListItem'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 5, 11, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'port': ('django.db.models.fields.IntegerField', [], {'default': '80'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'api.chatmessage': { 'Meta': {'object_name': 'ChatMessage'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'author'", 'to': "orm['auth.User']"}), 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '2000'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '300'}) }, 'api.domain': { 'Meta': {'object_name': 'Domain'}, 'agg_score': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}), 'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}) }, 'api.eyehistory': { 'Meta': {'object_name': 'EyeHistory'}, 'domain': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'end_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'end_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'favIconUrl': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'favicon_url': ('django.db.models.fields.TextField', [], {'default': "''"}), 'humanize_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'src': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'start_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}), 'total_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'api.eyehistorymessage': { 'Meta': {'ordering': "['-post_time']", 'object_name': 'EyeHistoryMessage'}, 'eyehistory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.EyeHistory']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'highlight': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Highlight']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300'}), 'parent_comment': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'post_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'api.eyehistoryraw': { 'Meta': {'object_name': 'EyeHistoryRaw'}, 'domain': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'end_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'end_time': ('django.db.models.fields.DateTimeField', [], {}), 'favIconUrl': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'favicon_url': ('django.db.models.fields.TextField', [], {'default': "''"}), 'humanize_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'src': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'start_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}), 'total_time': ('django.db.models.fields.IntegerField', [], {}), 'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'api.highlight': { 'Meta': {'object_name': 'Highlight'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'highlight': ('django.db.models.fields.CharField', [], {'max_length': '10000'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Page']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'api.mutelist': { 'Meta': {'object_name': 'MuteList'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'null': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'word': ('django.db.models.fields.URLField', [], {'max_length': '300', 'null': 'True'}) }, 'api.page': { 'Meta': {'object_name': 'Page'}, 'agg_score': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'default': "''"}), 'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Domain']"}), 'favIconUrl': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'favicon_url': ('django.db.models.fields.TextField', [], {'default': "''"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'img_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}), 'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}) }, 'api.popularhistory': { 'Meta': {'unique_together': "(('user', 'popular_history'),)", 'object_name': 'PopularHistory'}, 'avg_time_ago': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'avg_time_spent_score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'eye_hists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['api.EyeHistory']", 'symmetrical': 'False'}), 'humanize_avg_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'messages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['api.EyeHistoryMessage']", 'symmetrical': 'False'}), 'num_comment_score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'popular_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.PopularHistoryInfo']"}), 'top_score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'total_time_ago': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'total_time_spent': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'unique_visitor_score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'visitors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'pophist_visitors'", 'symmetrical': 'False', 'to': "orm['auth.User']"}) }, 'api.popularhistoryinfo': { 'Meta': {'object_name': 'PopularHistoryInfo'}, 'description': ('django.db.models.fields.TextField', [], {'default': "''"}), 'domain': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100'}), 'favIconUrl': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'favicon_url': ('django.db.models.fields.TextField', [], {'default': "''"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'img_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}), 'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}) }, 'api.ratings': { 'Meta': {'unique_together': "(('user', 'page'),)", 'object_name': 'Ratings'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Page']"}), 'score': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'api.summary': { 'Meta': {'object_name': 'Summary'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_editor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Page']"}), 'summary': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}) }, 'api.summaryhistory': { 'Meta': {'object_name': 'SummaryHistory'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_summary': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}), 'previous_summary': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}), 'summary': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['api.Summary']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}) }, 'api.whitelistitem': { 'Meta': {'unique_together': "(('user', 'url'),)", 'object_name': 'WhiteListItem'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 5, 11, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'port': ('django.db.models.fields.IntegerField', [], {'default': '80'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['api']
haystack/eyebrowse-server
api/migrations/0046_auto__add_unique_ratings_user_page.py
Python
mit
16,679
""" https://open.kattis.com/problems/easiest """ import sys def sum_digits(number): sum_of_digits = 0 while number: sum_of_digits, number = sum_of_digits + number % 10, number // 10 return sum_of_digits for line in sys.stdin: n = int(line) if n == 0: break p = 11 while True: if sum_digits(n) == sum_digits(n * p): print(p) break p += 1
cstewart90/kattis-python
easiest/easiest.py
Python
mit
428
#!/usr/bin/python import pygame import random import math from lib.GameObjects import Player, Enemy from lib.Globals import Vars, FixPath, UnitType, TileImage from lib.Pathing import MovementPath from lib.EventHandler import JoyHandler, KeyHandler from lib.Effects import Explosion from lib.Menu import * from lib.Logger import * class LevelState(): MAINMENU = 0 MENU = 1 GAME = 2 GAMEOVER = 3 ## TODO: Level storage and loading. class LevelBase(): def __init__(self, level_id=None): self.LevelID = level_id self.KeyHandle = KeyHandler(str(self.LevelID)) self.JoyHandle = JoyHandler(str(self.LevelID)) self.Background = None self.GameOver = False self.GameOverTimer = 2 * 60 def reset(self): self.GameOver = False self.GameOverTimer = 2 * 60 def init_controls(self): raise NotImplementedError def update(self): raise NotImplementedError def draw(self, screen): raise NotImplementedError class LevelControl(): def __init__(self): ## Sorted list of LevelData() objects self.LoadedLevels = [] self.CurrentLevel = None self.LevelState = None self.MainMenu = SimpleMenu() self.MainMenu.set_title('Main Menu') self.MainMenu.add_item(10, 'Exit', self.m_exit) self.LevelMenu = SimpleMenu() self.LevelMenu.set_title('Paused') self.LevelMenu.add_item(1, 'Resume', self.m_resume) self.LevelMenu.add_item(2, 'Exit to main menu', self.show_main_menu) self.GameOverMenu = SimpleMenu() self.GameOverMenu.set_title('Game Over') self.GameOverMenu.add_item(1, 'Return to main menu', self.show_main_menu) self.GameOverMenu.add_item(2, 'Exit', self.m_exit) Info('LevelControl init finished.') self.show_main_menu() def show_level_menu(self): self.LevelState = LevelState.MENU self.LevelMenu.reset() vars = Vars() vars.CurrentHandler = self.LevelMenu.KeyHandle vars.CurrentHandler_js = self.LevelMenu.JoyHandle bg = pygame.Surface(pygame.display.get_surface().get_size()) self.CurrentLevel.draw(bg) self.LevelMenu.set_background(bg) def show_game_over_menu(self): self.LevelState = LevelState.GAMEOVER self.GameOverMenu.reset() vars = Vars() vars.CurrentHandler = self.GameOverMenu.KeyHandle vars.CurrentHandler_js = self.GameOverMenu.JoyHandle bg = pygame.Surface(pygame.display.get_surface().get_size()) self.CurrentLevel.draw(bg) self.GameOverMenu.set_background(bg) def debug_dump(self): Debug(" == Dumping loaded levels ==") for l in self.LoadedLevels: Debug(str(l.LevelID)) Debug(" == End Dump ==") def m_resume(self): self.LevelState = LevelState.GAME vars = Vars() vars.CurrentHandler = self.CurrentLevel.KeyHandle vars.CurrentHandler_js = self.CurrentLevel.JoyHandle def show_main_menu(self): self.LevelState = LevelState.MAINMENU self.MainMenu.reset() vars = Vars() vars.CurrentHandler = self.MainMenu.KeyHandle vars.CurrentHandler_js = self.MainMenu.JoyHandle vars.CurrentHandler.add_keydown_handle(pygame.K_a, self.debug_dump) def m_exit(self): vars = Vars() vars.Running = False def start_level(self, levelid): Debug("Attempting to start level '" + str(levelid) + "'") self.LevelState = LevelState.GAME vars = Vars() for l in self.LoadedLevels: if l.LevelID == levelid: self.CurrentLevel = l vars.CurrentHandler = l.KeyHandle vars.CurrentHandler_js = l.JoyHandle self.CurrentLevel.reset() self.CurrentLevel.init_controls() self.CurrentLevel.KeyHandle.add_keydown_handle(pygame.K_ESCAPE, self.show_level_menu) self.CurrentLevel.JoyHandle.add_joydown_handle(9, self.show_level_menu) self.LevelState = LevelState.GAME def preload_level(self, levelObj): if isinstance(levelObj, LevelBase): self.LoadedLevels.append(levelObj) self.MainMenu.add_item(len(self.LoadedLevels) + 1, levelObj.LevelID, self.start_level, levelObj.LevelID) Debug("Loading level '" + str(levelObj.LevelID) + "'\n\tTotal loaded: " + str(len(self.LoadedLevels))) else: raise TypeError("Level is not correct type in load_level()!") def update(self): if self.LevelState is not LevelState.GAMEOVER and self.LevelState is LevelState.GAME and self.CurrentLevel is not None and (self.CurrentLevel.GameOver and self.CurrentLevel.GameOverTimer <= 0): Warn("Setting level state to GameOver") self.show_game_over_menu() if self.LevelState is LevelState.MAINMENU: self.MainMenu.update() elif self.LevelState is LevelState.MENU: self.LevelMenu.update() elif self.LevelState is LevelState.GAME: if self.CurrentLevel is not None: self.CurrentLevel.update() else: raise RuntimeError("CurrentLevel is not set!") elif self.LevelState is LevelState.GAMEOVER: self.GameOverMenu.update() def draw(self, screen): if self.LevelState is LevelState.GAME: if self.CurrentLevel is not None: self.CurrentLevel.draw(screen) else: Warn("CurrentLevel is None! returning to the main menu.") self.LevelState = LevelState.MAINMENU self.MainMenu.draw(screen) elif self.LevelState is LevelState.MENU: self.LevelMenu.draw(screen) elif self.LevelState is LevelState.MAINMENU: self.MainMenu.draw(screen) elif self.LevelState is LevelState.GAMEOVER: self.GameOverMenu.draw(screen) else: raise NotImplementedError("Whoops. From LevelControl.draw().") class DefaultLevel(LevelBase): def __init__(self): LevelBase.__init__(self, 'Default Level') ## Actual level stuff now self.SpawnInterval = 120 self.NextSpawn = 0 self.Enemies = pygame.sprite.Group() self.Player = Player() self.Projectiles = pygame.sprite.Group() ## Load the background and pre-calculate its dimensions self.Background = TileImage('png/Backgrounds/purple.png') self.rand = random.Random() self.init_controls() def reset(self): super().reset() self.Enemies.empty() self.Projectiles.empty() self.Player.reset() self.init_controls() def init_controls(self): self.KeyHandle.clear_all() self.KeyHandle.add_keyhold_handle(pygame.K_SPACE, self.Player.ToggleFire) self.KeyHandle.add_keyhold_handle(pygame.K_LEFT, self.Player.MoveLeft) self.KeyHandle.add_keyhold_handle(pygame.K_RIGHT, self.Player.MoveRight) self.KeyHandle.add_keyhold_handle(pygame.K_UP, self.Player.MoveUp) self.KeyHandle.add_keyhold_handle(pygame.K_DOWN, self.Player.MoveDown) self.KeyHandle.add_keydown_handle(pygame.K_b, self.Player.FireBomb) self.JoyHandle.clear_all() self.JoyHandle.add_joyhold_handle('hatposx', self.Player.MoveRight) self.JoyHandle.add_joyhold_handle('hatnegx', self.Player.MoveLeft) self.JoyHandle.add_joyhold_handle('hatposy', self.Player.MoveUp) self.JoyHandle.add_joyhold_handle('hatnegy', self.Player.MoveDown) self.JoyHandle.add_joyhold_handle(0, self.Player.ToggleFire) self.JoyHandle.add_joydown_handle(2, self.Player.FireBomb) def update(self): if self.NextSpawn <= 0: x = self.rand.randint( pygame.display.get_surface().get_rect().left + 50, pygame.display.get_surface().get_rect().right - 50 ) y = 0 mirror = False if x % 2 == 0: mirror = True e = Enemy() p = MovementPath(e) p.load_path(FixPath('data/CurveDownPath.dat'), (x, y), 2, mirror) e.set_path(p) self.Enemies.add(e) self.NextSpawn = self.SpawnInterval else: self.NextSpawn -= 1 self.Enemies.update() if not self.GameOver: self.Player.update() self.Projectiles.update() ## TODO: projectile collision from non-player vehicles collisions = pygame.sprite.groupcollide(self.Enemies, self.Player.Projectiles, True, False) for sp in collisions: self.Projectiles.add(Explosion(UnitType.PLAYER, sp.rect.center)) if not self.Player.dead: for enemy in self.Enemies: col = pygame.sprite.spritecollideany(self.Player, enemy.Projectiles) if col != None: Debug("{}".format(self.Player.rect.center)) self.Projectiles.add(Explosion(UnitType.ENEMY, self.Player.rect.center)) self.Player.dead = True self.GameOver = True else: self.GameOverTimer -= 1 if self.GameOverTimer < 0: #Warn("GAME OVER") self.LevelState = LevelState.GAMEOVER def draw(self, screen): screen.blit(self.Background, (0,0)) #self.Enemies.draw(screen) for e in self.Enemies: e.draw(screen) self.Player.draw(screen) self.Projectiles.draw(screen)
zorchenhimer/NoudaEngine
lib/Level.py
Python
mit
9,721
""" homeassistant.components.device_tracker.actiontec ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Device tracker platform that supports scanning an Actiontec MI424WR (Verizon FIOS) router for device presence. This device tracker needs telnet to be enabled on the router. Configuration: To use the Actiontec tracker you will need to add something like the following to your config/configuration.yaml. If you experience disconnects you can modify the home_interval variable. device_tracker: platform: actiontec host: YOUR_ROUTER_IP username: YOUR_ADMIN_USERNAME password: YOUR_ADMIN_PASSWORD # optional: home_interval: 10 Variables: host *Required The IP address of your router, e.g. 192.168.1.1. username *Required The username of an user with administrative privileges, usually 'admin'. password *Required The password for your given admin account. home_interval *Optional If the home_interval is set then the component will not let a device be AWAY if it has been HOME in the last home_interval minutes. This is in addition to the 3 minute wait built into the device_tracker component. """ import logging from datetime import timedelta from collections import namedtuple import re import threading import telnetlib import homeassistant.util.dt as dt_util from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD from homeassistant.helpers import validate_config from homeassistant.util import Throttle, convert from homeassistant.components.device_tracker import DOMAIN # Return cached results if last scan was less then this time ago MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5) # interval in minutes to exclude devices from a scan while they are home CONF_HOME_INTERVAL = "home_interval" _LOGGER = logging.getLogger(__name__) _LEASES_REGEX = re.compile( r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})' + r'\smac:\s(?P<mac>([0-9a-f]{2}[:-]){5}([0-9a-f]{2}))') # pylint: disable=unused-argument def get_scanner(hass, config): """ Validates config and returns a DD-WRT scanner. """ if not validate_config(config, {DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]}, _LOGGER): return None scanner = ActiontecDeviceScanner(config[DOMAIN]) return scanner if scanner.success_init else None Device = namedtuple("Device", ["mac", "ip", "last_update"]) class ActiontecDeviceScanner(object): """ This class queries a an actiontec router for connected devices. Adapted from DD-WRT scanner. """ def __init__(self, config): self.host = config[CONF_HOST] self.username = config[CONF_USERNAME] self.password = config[CONF_PASSWORD] minutes = convert(config.get(CONF_HOME_INTERVAL), int, 0) self.home_interval = timedelta(minutes=minutes) self.lock = threading.Lock() self.last_results = [] # Test the router is accessible data = self.get_actiontec_data() self.success_init = data is not None _LOGGER.info("actiontec scanner initialized") if self.home_interval: _LOGGER.info("home_interval set to: %s", self.home_interval) def scan_devices(self): """ Scans for new devices and return a list containing found device ids. """ self._update_info() return [client.mac for client in self.last_results] def get_device_name(self, device): """ Returns the name of the given device or None if we don't know. """ if not self.last_results: return None for client in self.last_results: if client.mac == device: return client.ip return None @Throttle(MIN_TIME_BETWEEN_SCANS) def _update_info(self): """ Ensures the information from the Actiontec MI424WR router is up to date. Returns boolean if scanning successful. """ _LOGGER.info("Scanning") if not self.success_init: return False with self.lock: exclude_targets = set() exclude_target_list = [] now = dt_util.now() if self.home_interval: for host in self.last_results: if host.last_update + self.home_interval > now: exclude_targets.add(host) if len(exclude_targets) > 0: exclude_target_list = [t.ip for t in exclude_targets] actiontec_data = self.get_actiontec_data() if not actiontec_data: return False self.last_results = [] for client in exclude_target_list: if client in actiontec_data: actiontec_data.pop(client) for name, data in actiontec_data.items(): device = Device(data['mac'], name, now) self.last_results.append(device) self.last_results.extend(exclude_targets) _LOGGER.info("actiontec scan successful") return True def get_actiontec_data(self): """ Retrieve data from Actiontec MI424WR and return parsed result. """ try: telnet = telnetlib.Telnet(self.host) telnet.read_until(b'Username: ') telnet.write((self.username + '\n').encode('ascii')) telnet.read_until(b'Password: ') telnet.write((self.password + '\n').encode('ascii')) prompt = telnet.read_until( b'Wireless Broadband Router> ').split(b'\n')[-1] telnet.write('firewall mac_cache_dump\n'.encode('ascii')) telnet.write('\n'.encode('ascii')) telnet.read_until(prompt) leases_result = telnet.read_until(prompt).split(b'\n')[1:-1] telnet.write('exit\n'.encode('ascii')) except EOFError: _LOGGER.exception("Unexpected response from router") return except ConnectionRefusedError: _LOGGER.exception("Connection refused by router," + " is telnet enabled?") return None devices = {} for lease in leases_result: match = _LEASES_REGEX.search(lease.decode('utf-8')) if match is not None: devices[match.group('ip')] = { 'ip': match.group('ip'), 'mac': match.group('mac').upper() } return devices
EricRho/home-assistant
homeassistant/components/device_tracker/actiontec.py
Python
mit
6,633
from ..sqlclear import SQLClearCommand as Command
skibblenybbles/django-commando
commando/django/core/management/commands/sqlclear.py
Python
mit
50
#!/usr/bin/env ipython # -*- coding: utf-8 -*- from datetime import datetime, time, timedelta import numpy as np import console_colors as ccl from scipy.io.netcdf import netcdf_file from ShiftTimes import ShiftCorrection, ShiftDts import os, argparse import h5py from h5py import File as h5 from numpy import ( mean, median, nanmean, nanmedian, std, nan, isnan, min, max, zeros, ones, size, loadtxt ) from os.path import isfile, isdir if 'DISPLAY' in os.environ: # to avoid crash when running remotely from pylab import figure, savefig, close, find, pause import matplotlib.patches as patches import matplotlib.transforms as transforms #from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig #from z_expansion_gulisano import z as z_exp _ERROR_ = ccl.Rn+' ### ERROR ###: '+ccl.W def flags2nan(VAR, FLAG): cond = VAR < FLAG VAR = np.array(VAR) VAR[~cond] = np.nan return VAR def date_to_utc(fecha): utc = datetime(1970, 1, 1, 0, 0, 0, 0) sec_utc = (fecha - utc).total_seconds() return sec_utc def selecc_data(data, tshk): time = data[0] #[s] utc sec rate = data[1] day = 86400. # [seg] utc = datetime(1970, 1, 1, 0, 0, 0, 0) tshk_utc = (tshk - utc).total_seconds() ti = tshk_utc - 10.*day # [seg] utc tf = tshk_utc + 30.*day cond = (time > ti) & (time < tf) time = (time[cond] - tshk_utc) / day # [days] since shock rate = rate[cond] return (time, rate) def selecc_window(data, tini, tend): time = data[0] #[s] utc sec y = data[1] day = 86400. # [seg] utc = datetime(1970, 1, 1, 0, 0, 0, 0) tini_utc = (tini - utc).total_seconds() # [s] utc sec tend_utc = (tend - utc).total_seconds() # [s] utc sec ti = tini_utc # [seg] utc tf = tend_utc cond = (time > ti) & (time < tf) time = (time[cond] - tini_utc) / day # [days] since 'ti' y = y[cond] return (time, y) def enoughdata(var, fgap): n = len(var) ngood = len(find(~isnan(var))) fdata = 1.*ngood/n # fraccion de data sin gaps if fdata>=(1.-fgap): return True else: return False def averages_and_std(n_icmes, t_shck, ti_icme, dTday, nbin, t_utc, VAR, fgap): day = 86400. nok=0; nbad=0 adap = [] for i in range(n_icmes): dT = (ti_icme[i] - t_shck[i]).total_seconds()/day # [day] if dT>dTday: dt = dT/nbin t, var = selecc_window( [t_utc, VAR], t_shck[i], ti_icme[i] ) if enoughdata(var, fgap): # pido q haya mas del 80% NO sean gaps adap += [adaptar(nbin, dt, t, var)] nok +=1 else: continue else: print " i:%d ---> Este evento es muy chico!, dT/day:%g" % (i, dT) nbad +=1 VAR_adap = zeros(nbin*nok).reshape(nok, nbin) for i in range(nok): VAR_adap[i,:] = adap[i][1] VAR_avrg = zeros(nbin) VAR_std = zeros(nbin) ndata = zeros(nbin) for i in range(nbin): cond = ~isnan(VAR_adap.T[i,:]) ndata[i] = len(find(cond)) # nro de datos != flag VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos tnorm = adap[0][0] return [nok, nbad, tnorm, VAR_avrg, VAR_std, ndata] def adaptar(n, dt, t, r): #n = int(5./dt) # nro de puntos en todo el intervalo de ploteo tt = zeros(n) rr = zeros(n) for i in range(n): tmin = i*dt tmax = (i+1.)*dt cond = (t>tmin) & (t<tmax) tt[i] = mean(t[cond]) rr[i] = mean(r[cond]) return [tt/(n*dt), rr] def adaptar(nwndw, dT, n, dt, t, r): #n = int(5./dt) # nro de puntos en todo el intervalo de ploteo tt = zeros(n) rr = zeros(n) _nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath for i in range(n): tmin = (i-nwndw[0]*_nbin_)*dt tmax = tmin + dt cond = (t>tmin) & (t<tmax) tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1) rr[i] = mean(r[cond]) return [tt/dT, rr] # tiempo normalizado x la duracion de la sheath #@profile def adaptar_ii(nwndw, dT, n, dt, t, r, fgap): tt = zeros(n) rr = zeros(n) _nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath/mc cc = (t>0.) & (t<dT) # intervalo de la sheath/mc #print " r[cc]: ", r[cc] if len(r[cc])==0: # no hay data en esta ventana rr = nan*ones(n) enough = False else: enough = enoughdata(r[cc], fgap) # [bool] True si hay mas del 80% de data buena. if not(enough): rr = nan*ones(n) # si no hay suficiente data, este evento no aporta for i in range(n): tmin = (i-nwndw[0]*_nbin_)*dt tmax = tmin + dt cond = (t>=tmin) & (t<=tmax) #tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1) # bug tt[i] = tmin + .5*dt # bug corregido if enough: #cc = ~isnan(r[cond]) # no olvidemos filtrar los gaps #rr[i] = mean(r[cond][cc]) rr[i] = nanmean(r[cond]) return enough, [tt/dT, rr] # tiempo normalizado x la duracion de la sheath/mc/etc #@profile def selecc_window_ii(nwndw, data, tini, tend): time = data[0] #[s] utc sec y = data[1] day = 86400. # [seg] utc = datetime(1970, 1, 1, 0, 0, 0, 0) tini_utc = (tini - utc).total_seconds() # [s] utc sec tend_utc = (tend - utc).total_seconds() # [s] utc sec dt = tend_utc - tini_utc ti = tini_utc - nwndw[0]*dt # [seg] utc tf = tend_utc + nwndw[1]*dt cond = (time > ti) & (time < tf) time = (time[cond] - tini_utc) / day # [days] since 'ti' y = y[cond] return (time, y) def averages_and_std_ii(nwndw, SELECC, #MCsig, MCwant, n_icmes, tini, tend, dTday, nbin, t_utc, VAR): day = 86400. nok=0; nbad=0 adap = [] for i in range(n_icmes): dT = (tend[i] - tini[i]).total_seconds()/day # [day] if ((dT>dTday) & SELECC[i]):# (MCsig[i]>=MCwant)): dt = dT*(1+nwndw[0]+nwndw[1])/nbin t, var = selecc_window_ii( nwndw, # nro de veces hacia atras y adelante [t_utc, VAR], tini[i], tend[i] ) adap += [adaptar(nwndw, dT, nbin, dt, t, var)] # rebinea usando 'dt' como el ancho de nuevo bineo nok +=1 else: print " i:%d ---> Filtramos este evento!, dT/day:%g" % (i, dT) nbad +=1 VAR_adap = zeros(nbin*nok).reshape(nok, nbin) for i in range(nok): VAR_adap[i,:] = adap[i][1] VAR_avrg = zeros(nbin) VAR_medi = zeros(nbin) VAR_std = zeros(nbin) ndata = zeros(nbin) for i in range(nbin): cond = ~isnan(VAR_adap.T[i,:]) ndata[i] = len(find(cond)) # nro de datos != flag VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag VAR_medi[i] = median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos tnorm = adap[0][0] return [nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata] def mvs_for_each_event(VAR_adap, nbin, nwndw, Enough, verbose=False): nok = size(VAR_adap, axis=0) mvs = zeros(nok) # valores medios por cada evento binsPerTimeUnit = nbin/(1+nwndw[0]+nwndw[1]) # nro de bines por u. de tiempo start = nwndw[0]*binsPerTimeUnit # en este bin empieza la estructura (MC o sheath) for i in range(nok): aux = VAR_adap[i, start:start+binsPerTimeUnit] # (*) cc = ~isnan(aux) # pick good-data only #if len(find(cc))>1: if Enough[i]: # solo imprimo los q tienen *suficiente data* if verbose: print ccl.G + "id %d/%d: %r"%(i+1, nok, aux[cc]) + ccl.W mvs[i] = mean(aux[cc]) else: mvs[i] = nan #(*): esta es la serie temporal (de esta variable) para el evento "i" pause(1) return mvs def diff_dates(tend, tini): n = len(tend) diffs = np.nan*np.ones(n) for i in range(n): ok = type(tend[i]) == type(tini[i]) == datetime # ambos deben ser fechas! if ok: diffs[i] = (tend[i] - tini[i]).total_seconds() else: diffs[i] = np.nan return diffs #[sec] def write_variable(fout, varname, dims, var, datatype, comments): dummy = fout.createVariable(varname, datatype, dims) dummy[:] = var dummy.units = comments def calc_beta(Temp, Pcc, B): # Agarramos la definicion de OMNI, de: # http://omniweb.gsfc.nasa.gov/ftpbrowser/magnetopause/Reference.html # http://pamela.roma2.infn.it/index.php # Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT) # beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2 return beta def thetacond(ThetaThres, ThetaSh): """ Set a lower threshold for shock orientation, using Wang's catalog of shocks. NOTE: Near 180Â means very close to the nose! """ if ThetaThres<=0.: print ccl.Rn + ' ----> BAD WANG FILTER!!: ThetaThres<=0.' print ' ----> Saliendo...' + ccl.Rn raise SystemExit #return ones(len(ThetaSh), dtype=bool) else: return (ThetaSh > ThetaThres) def wangflag(ThetaThres): if ThetaThres<0: return 'NaN' else: return str(ThetaThres) def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm, SUBTITLE, YLIMS, YLAB, fname_fig): fig = figure(1, figsize=(13, 6)) ax = fig.add_subplot(111) ax.plot(tnorm, avrVAR, 'o-', color='black', markersize=5, label='mean') ax.plot(tnorm, medVAR, 'o-', color='red', alpha=.5, markersize=5, markeredgecolor='none', label='median') inf = avrVAR + stdVAR/np.sqrt(nVAR) sup = avrVAR - stdVAR/np.sqrt(nVAR) ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5) trans = transforms.blended_transform_factory( ax.transData, ax.transAxes) rect1 = patches.Rectangle((0., 0.), width=1.0, height=1, transform=trans, color='blue', alpha=0.3) ax.add_patch(rect1) ax.legend(loc='upper right') ax.grid() ax.set_ylim(YLIMS) TITLE = SUBTITLE ax.set_title(TITLE) ax.set_xlabel('time normalized to MC passage time [1]', fontsize=14) ax.set_ylabel(YLAB, fontsize=20) savefig(fname_fig, format='png', dpi=180, bbox_inches='tight') close() def makefig_ii(mc, sh, YLIMS, YLAB, **kws): """ - ftext{bool}: if False, we put the text in the title. Otherwise, we put the text inside the figure, using `TEXT_LOC`{dict} as positions - TEXT_LOC{dict}: coordinates for the text inside the figure. The `TEXT_LOC['sh']`{2-tuple} are the positions for the left part, and `TEXT_LOC['mc']`{2-tuple} for the right part. """ #--- kws ftext = kws.get('ftext', False) TEXT = kws.get('TEXT', None) TEXT_LOC = kws.get('TEXT_LOC', None) fname_fig = kws.get('fname_fig', None) #------------------------------------- fmc,fsh = 3.0, 1.0 # escaleos temporales #--- if figure is not given, create one if 'fig' in kws: fig, ax = kws['fig'], kws['ax'] else: fig = figure(1, figsize=(13, 6)) ax = fig.add_subplot(111) # catch the name of the observable if 'varname' in kws: varname = kws['varname'] else: varname = fname_fig[:-4].split('_')[-1] if(varname == 'Temp'): mc.med /= 1.0e4; sh.med /= 1.0e4 mc.avr /= 1.0e4; sh.avr /= 1.0e4 mc.std_err /= 1.0e4; sh.std_err /= 1.0e4 YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4 if ftext: TEXT_LOC['mc'][1] /= 1.0e4 TEXT_LOC['sh'][1] /= 1.0e4 # curvas del mc time = fsh+fmc*mc.tnorm cc = time>=fsh ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=5) ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none') # sombra del mc inf = mc.avr + mc.std_err/np.sqrt(mc.nValues) sup = mc.avr - mc.std_err/np.sqrt(mc.nValues) ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5) trans = transforms.blended_transform_factory( ax.transData, ax.transAxes) rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1, transform=trans, color='blue', alpha=0.3) ax.add_patch(rect1) # curvas del sheath time = fsh*sh.tnorm cc = time<=fsh ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=5) ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none') # sombra del sheath inf = sh.avr + sh.std_err/np.sqrt(sh.nValues) sup = sh.avr - sh.std_err/np.sqrt(sh.nValues) ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5) #trans = transforms.blended_transform_factory( # ax.transData, ax.transAxes) rect1 = patches.Rectangle((0., 0.), width=fsh, height=1, transform=trans, color='orange', alpha=0.3) ax.add_patch(rect1) ax.tick_params(labelsize=17) ax.grid() ax.set_ylim(YLIMS) if ftext: ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=22) ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=22) else: if TEXT is not None: ax.set_title( 'left: '+TEXT['sh']+'\n' 'right: '+TEXT['mc'] ) else: pass # no text anywhere ax.set_ylabel(YLAB, fontsize=27) # if `varname` has any of these strings, plot in log-scale. if any([(nm in varname) for nm in \ ('beta','Temp', 'rmsB', 'rmsBoB', 'ratio')]): ax.set_yscale('log') else: ax.set_yscale('linear') ax.legend(loc='best', fontsize=20) if 'fig' not in kws: # if figure not given, save to disk ax.set_xlim(-2.0, 7.0) ax.set_xlabel('time normalized to sheath/MC passage [1]', fontsize=25) savefig(fname_fig, format='png', dpi=100, bbox_inches='tight') close() return None else: # return changes of passed figure return fig, ax #--- chekea q el archivo no repita elementos de la 1ra columna def check_redundancy(fname, name): f = open(fname, 'r') dummy = {} for line in f: ll = line.split(' ') varname = ll[0] dummy[varname] = 0 dummy_names = dummy.keys() dummy_set = set(dummy_names) redundancy = len(dummy_set)<len(dummy_names) overwriting = name in dummy_set if redundancy or overwriting: return True else: return False class general: def __init__(self): self.name = 'name' class dummy1: def __init__(self,): pass class dummy2 (object): """ can be used: >>> dd = dummy2() >>> dd['name'] = [3,4,5] >>> dd['name2'].time = [0,1,2,3,4] """ def __init__(self): self.this = {} def __getitem__(self, idx): if not idx in self.this.keys(): self.this[idx] = dummy1() return self.this[idx] def set(self, name, attname, value): if not name in self.this.keys(): self.this[name] = dummy1() setattr(self.this[name], attname, value) def keys(self,): return self.this.keys() class boundaries: def __init__(self): print self.__dict__ print dict(self) def ff(self): self.fg = 0.2 def nans(sh): return np.nan*np.ones(sh) def grab_time_domain(adap, check=False): """ Search for a valid time domain for this `varname` and return. If `check`==True, it checks that all time domains are the same (for all `varname`s) unless a difference of 10 times the numerical epsilon. """ na = len(adap) # grab all posible time domains found = False for i in range(na): for name in adap[i].keys(): if not(found): tarr = adap[i][name][0] if tarr is not None: found = True if found: # we found a valid time domain (`tarr`) if check: # assume time array is 'np.float32' eps32 = np.finfo(np.float32).eps for i in range(na): for name in adap[i].keys(): tarr_ = adap[i][name][0] if tarr_ is not None: # they differ at most in its # numerical epsilon ok = (tarr_-tarr<=eps32) assert ok.prod(),\ " we have more than 1 valid time domain!!:\n%r\n\n%r"%( tarr_, tarr) return tarr #--- didn't find any valid time domain try: # hung in debug mode import pdb; pdb.set_trace() except ImportError: # ok, get out! raise SystemExit( 'shut! none are valid time domains:\n %r'%t_array ) class events_mgr(object): def __init__(self, gral, FILTER, CUTS, bd, nBin, fgap, tb, z_exp, structure='mc', fparam='mc_V', verbose=True): """ structure: can be 'sh.mc', 'sh.i', 'mc', 'i', refering to sheath-of-mc, sheath-of-icme, mc, and icme, respectively. This is to use the proper mean values calculated in each structure. """ self.fparam = fparam self.structure = structure self.data_name = gral.data_name self.FILTER = FILTER self.CUTS = CUTS self.bd = bd self.nBin = nBin self.fgap = fgap self.tb = tb self.z_exp = z_exp self.dir_plots = gral.dirs['dir_plots'] self.dir_ascii = gral.dirs['dir_ascii'] self.gral = gral self._dirs_ = gral.dirs self.verbose = verbose #self.f_sc = netcdf_file(gral.fnames[gral.data_name], 'r') self.f_events = netcdf_file(gral.fnames['table_richardson'], 'r') print " -------> archivos input leidos!" #--- put False to all possible data-flags (all CR detector-names # must be included in 'self.CR_observs') self.names_ok = ('Auger_BandMuons', 'Auger_BandScals', 'Auger_scals', \ 'McMurdo', 'ACE', 'ACE_o7o6', 'ACE1sec') for name in self.names_ok: read_flag = 'read_'+name setattr(self, read_flag, False) # True: if files are already read #--- names of CR observatories self.CR_observs = ( #must **include** the 'load_data_..()' methods 'Auger_scals', 'Auger_BandMuons', 'Auger_BandScals',\ 'McMurdo') #--- just a check for load_data_.. methods for att_name in dir(events_mgr): # iterate on all methods if att_name.startswith('load_data_'): att_suffix = att_name.replace('load_data_', '') assert att_suffix in self.names_ok,\ " [-] ERROR: one of the methods '%s' is NOT taken into account in 'self.CR_observs' (%s) " % (att_name, att_suffix) self.data_name_ = str(self.data_name) # nombre de la data input inicial (*1) self.IDs_locked = False # (*2) """ (*1): si despues cambia 'self.data_name', me voy a dar cuenta en la "linea" FLAG_001. (*2): lock in lock_IDs(). True: if the id's of the events have been fixed/locked, so that later analysis is resctricted only with theses locked id's. """ #++++++++++ CORRECTION OF BORDERS ++++++++++ # IMPORTANTE: # Solo valido para los "63 eventos" (MCflag='2', y visibles en ACE) # NOTA: dan saltos de shock mas marcados con True. # TODO: make a copy/deepcopy of `tb` and `bd`, so that we don't # bother the rest of data_names (i.e. Auger_scals, Auger_BandMuons, # etc.) if FILTER['CorrShift']: ShiftCorrection(ShiftDts, tb.tshck) ShiftCorrection(ShiftDts, tb.tini_icme) ShiftCorrection(ShiftDts, tb.tend_icme) ShiftCorrection(ShiftDts, tb.tini_mc) ShiftCorrection(ShiftDts, tb.tend_mc) ShiftCorrection(ShiftDts, bd.tini) ShiftCorrection(ShiftDts, bd.tend) def run_all(self, _data_handler): #----- seleccion de eventos self.filter_events() print "\n ---> filtrado de eventos (n:%d): OK\n" % (self.n_SELECC) #----- load data y los shiftimes "omni" self.load_files_and_timeshift_ii(_data_handler) #----- rebineo y promedios self.rebine() self.rebine_final() #----- hacer ploteos self.make_plots() #----- archivos "stuff" self.build_params_file() #@profile def rebine(self, collect_only=False): """ rebineo de c/evento """ nvars = self.nvars #len(VARS) n_icmes = self.tb.n_icmes bd = self.bd VARS = self.VARS nbin = self.nBin['total'] nwndw = [self.nBin['before'], self.nBin['after']] day = 86400. #---- quiero una lista de los eventos-id q van a incluirse en c/promedio :-) IDs = {} Enough, nEnough = {}, {} self.__ADAP__ = ADAP = [] # conjunto de varios 'adap' (uno x c/variable) for varname in VARS.keys(): IDs[varname] = [] Enough[varname] = [] nEnough[varname] = 0 # counter # recorremos los eventos: nok, nbad = 0, 0 nnn = 0 # nro de evento q pasan el filtro a-priori self.out = {} if collect_only: self.out['events_data'] = {} # bag to save data from events ok = np.zeros(n_icmes,dtype=np.bool) # all `False` by default for i in range(n_icmes): try: #no todos los elementos de 'tend' son fechas (algunos eventos no tienen fecha definida) # this 'i'-event must be contained in our data-base ok[i] = date_to_utc(bd.tini[i]) >= self.t_utc[0] #True ok[i] &= date_to_utc(bd.tend[i]) <= self.t_utc[-1] if self.IDs_locked: ok[i] &= i in self.restricted_IDs except: # e.g. if `bd.{tini,tend}[i]` is NaN ok[i] = False for i in range(n_icmes): #np.set_printoptions(4) # nro de digitos a imprimir al usar numpy.arrays if not (ok[i] & self.SELECC[i]): #---FILTRO--- (*1) print ccl.Rn, " id:%d ---> ok, SELECC: "%i, ok[i], self.SELECC[i], ccl.W nbad +=1 continue dT = (bd.tend[i] - bd.tini[i]).total_seconds()/day # [day] ADAP += [ {} ] # agrego un diccionario a la lista nnn += 1 print ccl.Gn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W print self.tb.tshck[i] nok +=1 if collect_only: # evdata is just a pointer evdata = self.out['events_data']['id_%03d'%i] = dummy2() #{} # recorremos las variables: for varname in VARS.keys(): dt = dT*(1+nwndw[0]+nwndw[1])/nbin t, var = self.grab_window( nwndw=nwndw, #rango ploteo data=[self.t_utc, VARS[varname]['value']], tini=bd.tini[i], tend=bd.tend[i], vname=varname, # for ACE 1sec ) if collect_only: evdata.set(varname, 'time', t) evdata.set(varname, 'data', var) #--- read average CR rates before shock/disturbance if self.data_name in self.CR_observs: # is it CR data? rate_pre = getattr(self, 'rate_pre_'+self.data_name) var = 100.*(var - rate_pre[i]) / rate_pre[i] #--- rebinea usando 'dt' como el ancho de nuevo bineo out = adaptar_ii( nwndw = nwndw, dT = dT, n = nbin, dt = dt, t = t, r = var, fgap = self.fgap ) enough = out[0] # True: data con menos de 100*'fgap'% de gap Enough[varname] += [ enough ] ADAP[nok-1][varname] = out[1] # out[1] = [tiempo, variable] if enough: #import pdb; pdb.set_trace() IDs[varname] += [i] nEnough[varname] += 1 #NOTE: `ADAP` points to `self.__ADAP__` print " ----> len.ADAP: %d" % len(ADAP) self.__nok__ = nok self.__nbad__ = nbad self.out['nok'] = nok self.out['nbad'] = nbad self.out['IDs'] = IDs self.out['nEnough'] = nEnough self.out['Enough'] = Enough def lock_IDs(self): """ This assumes that 'IDs' has only *one* key. That is, len(IDs)=1 !! """ IDs = self.out['IDs'] varname = IDs.keys()[0] self.restricted_IDs = IDs[varname] self.IDs_locked = True def rebine_final(self): """ rebineo de c/evento ... PARTE FINAL """ nvars = self.nvars #len(VARS) VARS = self.VARS nbin = self.nBin['total'] nwndw = [self.nBin['before'], self.nBin['after']] day = 86400. ## salidas del 'self.rebine()' ADAP = self.__ADAP__ Enough = self.out['Enough'] nEnough = self.out['nEnough'] IDs = self.out['IDs'] nok = self.out['nok'] nbad = self.out['nbad'] stuff = {} #[] # Hacemos un lugar para la data rebineada (posible uso post-analisis) if self.data_name==self.data_name_: self.rebined_data = {} # creamos el diccionario UNA sola vez for varname in VARS.keys(): if self.verbose: print ccl.On + " -------> procesando: %s" % VARS[varname]['label'] print " nEnough/nok/(nok+nbad): %d/%d/%d " % (nEnough[varname], nok, nok+nbad) + ccl.W VAR_adap = zeros((nok, nbin)) # perfiles rebineados (*) # (*): uno de estos por variable # recorro los 'nok' eventos q pasaron el filtro de arriba: for i in range(nok): VAR_adap[i,:] = ADAP[i][varname][1] # valores rebineados de la variable "j" para el evento "i" self.rebined_data[varname] = VAR_adap # valores medios de esta variable para c/evento avrVAR_adap = mvs_for_each_event(VAR_adap, nbin, nwndw, Enough[varname], self.verbose) if self.verbose: print " ---> (%s) avrVAR_adap[]: \n" % varname, avrVAR_adap VAR_avrg = zeros(nbin) VAR_avrgNorm = zeros(nbin) VAR_medi = zeros(nbin) VAR_std = zeros(nbin) ndata = zeros(nbin) # recorremos bin a bin, para calular media, mediana, error, etc... for i in range(nbin): cond = ~np.isnan(VAR_adap.T[i,:]) # filtro eventos q no aportan data en este bin ndata[i] = len(find(cond)) # nro de datos != nan VAR_avrg[i] = np.mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag VAR_avrgNorm[i] = np.mean(VAR_adap.T[i,cond]/avrVAR_adap[cond]) VAR_medi[i] = np.median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag VAR_std[i] = np.std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos stuff[varname] = [VAR_avrg, VAR_medi, VAR_std, ndata, avrVAR_adap] # NOTA: chekar q 'ADAP[j][varname][0]' sea igual para TODOS los # eventos 'j', y para TODOS los 'varname'. self.out['dVARS'] = stuff self.out['tnorm'] = grab_time_domain(ADAP, check=True) """def __getattr__(self, attname): if attname[:10]=='load_data_': return self.attname""" def load_files_and_timeshift_ii(self, _data_handler, obs_check=None): """ INPUT ----- * _data_handler: class that handles the i/o of the database related to 'data_name'. * obs_check: if not None, is a list of strings related to the names of the observables of our interest. The idea is to make sure that we are asking for variables that are included in our database `self.VARS`. """ read_flag = 'read_'+self.data_name # e.g. self.read_Auger if not(read_flag in self.__dict__.keys()): # do i know u? setattr(self, read_flag, False) #True: if files are already read #--- read data and mark flag as read! if not( getattr(self, read_flag) ): attname = 'load_data_'+self.data_name dh = _data_handler( input=self.gral.fnames[self.data_name], ) # point to the method that selects data from # a given window self.grab_window = dh.grab_block # {method} # grab/point-to data from disk #NOTE: if self.FILTER['CorrShift']==True, then `self.tb` and # `self.bd` will be shifted! out = dh.load(data_name=self.data_name, tb=self.tb, bd=self.bd) # attribute data pointers to `self` for nm, value in out.iteritems(): # set `t_utc` and `VAR` to `self` setattr(self,nm,value) # check that we are grabbing observables of our # interest if obs_check is not None: for nm in obs_check: nm_ = nm+'.'+self.data_name assert nm_ in self.VARS.keys(),\ " %s is not database list: %r"%(nm_, self.VARS.keys()) self.nvars = len(self.VARS.keys()) # mark as read self.read_flag = True # True: ya lei los archivos input #--- check weird case assert self.data_name in self.names_ok,\ _ERROR_+" not on my list!: %s" % self.data_name+\ "\n Must be one of these: %r" % [self.names_ok] def make_plots(self): """ #---- generar figuras y asciis de los perfiles promedio/mediana """ nBin = self.nBin fgap = self.fgap MCwant = self.FILTER['MCwant'] ThetaThres = self.CUTS['ThetaThres'] if self.FILTER['vsw_filter']: v_lo, v_hi = self.CUTS['v_lo'], self.CUTS['v_hi'] else: v_lo, v_hi = 0.0, 0.0 #estos valores significan q no hay filtro if self.FILTER['z_filter_on']: z_lo, z_hi = self.CUTS['z_lo'], self.CUTS['z_hi'] else: z_lo, z_hi = 0.0, 0.0 if self.FILTER['B_filter']: B_lo, B_hi = self.CUTS['B_lo'], self.CUTS['B_hi'] else: B_lo, B_hi = 0.0, 0.0 #estos valores significan q no hay filtro if self.FILTER['filter_dR.icme']: dR_lo, dR_hi = self.CUTS['dR_lo'], self.CUTS['dR_hi'] else: dR_lo, dR_hi = 0.0, 0.0 #estos valores significan q no hay filtro nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio #-------------------- prefijos: # prefijo para filtro Wang: if self.FILTER['wang']: WangFlag = str(ThetaThres) else: WangFlag = 'NaN' # prefijo gral para los nombres de los graficos: if self.FILTER['CorrShift']: prexShift = 'wShiftCorr' else: prexShift = 'woShiftCorr' #------------------------------- # nombres genericos... DIR_FIGS = '%s/MCflag%s/%s' % (self.dir_plots, MCwant['alias'], prexShift) DIR_FIGS += '/' + self._dirs_['suffix'] DIR_ASCII = '%s/MCflag%s/%s' % (self.dir_ascii, MCwant['alias'], prexShift) DIR_ASCII += '/' + self._dirs_['suffix'] os.system('mkdir -p %s' % DIR_FIGS) # si no existe, lo creamos os.system('mkdir -p %s' % DIR_ASCII) # (bis) print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W FNAMEs = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant['alias'], nBin['before'], nBin['after'], fgap) FNAMEs += '_Wang%s' % (WangFlag) if self.FILTER['vsw_filter']: FNAMEs += '_vlo.%03.1f.vhi.%04.1f' % (v_lo, v_hi) if self.FILTER['z_filter_on']: FNAMEs += '_zlo.%2.2f.zhi.%2.2f' % (z_lo, z_hi) if self.FILTER['B_filter']: FNAMEs += '_Blo.%2.2f.Bhi.%2.2f' % (B_lo, B_hi) if self.FILTER['filter_dR.icme']: FNAMEs += '_dRlo.%2.2f.dRhi.%2.2f' % (dR_lo, dR_hi) if not self.FILTER['vsw_filter']: FNAMEs += '_' # flag for post-processing, indicating # there was no splitting FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs) FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs) fname_nro = DIR_ASCII+'/'+'n.events_'+FNAMEs+'.txt' #'w': write mode #'a': append mode #---FLAG_001 if self.data_name==self.data_name_: fnro = open(fname_nro, 'w') else: fnro = open(fname_nro, 'a') # si uso otra data input, voy anotando el nro # de eventos al final del archivo 'fname_nro' #------------------------------------------------------------------- nvars = len(self.VARS) for varname in self.VARS.keys(): fname_fig = '%s_%s.png' % (FNAME_FIGS, varname) #self.VARS[i][1]) print ccl.Rn+ " ------> %s" % fname_fig ylims = self.VARS[varname]['lims'] #self.VARS[i][2] ylabel = self.VARS[varname]['label'] #self.VARS[i][3] average = self.out['dVARS'][varname][0] mediana = self.out['dVARS'][varname][1] #self.out['dVARS'][i][4] std_err = self.out['dVARS'][varname][2] nValues = self.out['dVARS'][varname][3] # number of values aporting to each data bin N_selec = self.out['nok'] #self.out['dVARS'][varname][0] N_final = self.out['nEnough'][varname] #nEnough[i] SUBTITLE = '# of selected events: %d \n\ events w/80%% of data: %d \n\ bins per time unit: %d \n\ MCflag: %s \n\ WangFlag: %s' % (N_selec, N_final, nBin['bins_per_utime'], MCwant['alias'], WangFlag) makefig(mediana, average, std_err, nValues, self.out['tnorm'], SUBTITLE, ylims, ylabel, fname_fig) fdataout = '%s_%s.txt' % (FNAME_ASCII, varname) #self.VARS[i][1]) dataout = np.array([self.out['tnorm'] , mediana, average, std_err, nValues]) print " ------> %s\n" % fdataout + ccl.W np.savetxt(fdataout, dataout.T, fmt='%12.5f') #-------- grabamos nro de eventos selecc para esta variable line = '%s %d %d\n' % (varname, N_final, N_selec) fnro.write(line) print ccl.Rn + " --> nro de eventos seleccionados: " + fname_nro + ccl.W fnro.close() #--- salidas (a parte de los .png) self.DIR_ASCII = DIR_ASCII self.FNAMEs = FNAMEs def build_params_file(self): """ Construye archivo q tiene cosas de los eventos seleccionados: - valores medios de los observables (B, Vsw, Temp, beta, etc) - los IDs de los eventos - duracion de los MCs y las sheaths """ DIR_ASCII = self.DIR_ASCII FNAMEs = self.FNAMEs #-------------------------------------------- begin: NC_FILE print "\n*********************************** begin: NC_FILE" #------- generamos registro de id's de los # eventos q entraron en los promedios. # Nota: un registro por variable. fname_out = DIR_ASCII+'/'+'_stuff_'+FNAMEs+'.nc' #'./test.nc' #---FLAG_001 if self.data_name==self.data_name_: fout = netcdf_file(fname_out, 'w') print "\n ----> generando: %s\n" % fname_out else: fout = netcdf_file(fname_out, 'a') # modo 'a': si uso otra data input, voy anotando el nro # de eventos al final del archivo 'fname_out' print "\n ----> anexando en: %s\n" % fname_out IDs = self.out['IDs'] for varname in self.VARS.keys(): print " ----> " + varname n_events = len(IDs[varname]) dimname = 'nevents_'+varname fout.createDimension(dimname, n_events) print " n_events: ", n_events prom = self.out['dVARS'][varname][4] cc = np.isnan(prom) print " nprom (all) : ", prom.size prom = prom[~cc] print " nprom (w/o nan): ", prom.size dims = (dimname,) write_variable(fout, varname, dims, prom, 'd', 'average_values per event') #---------- IDs de esta variable ids = map(int, IDs[varname]) vname = 'IDs_'+varname write_variable(fout, vname, dims, ids, 'i', 'event IDs that enter in this parameter average') #---------- duracion de la estructura dtsh = np.zeros(len(ids)) dtmc = np.zeros(len(ids)) for i in range(len(ids)): id = ids[i] dtsh[i] = self.dt_sh[id] dtmc[i] = self.dt_mc[id] vname = 'dt_sheath_'+varname write_variable(fout, vname, dims, dtsh, 'd', '[days]') vname = 'dt_mc_'+varname write_variable(fout, vname, dims, dtmc, 'd', '[days]') fout.close() print "**************************************** end: NC_FILE" #---------------------------------------------- end: NC_FILE def filter_events(self): structure = self.structure tb = self.tb FILTER = self.FILTER dTday = self.CUTS['dTday'] day = 86400. AU_o_km = 1./(150.0e6) sec_o_day = 86400. #------------------------------------ EVENTS's PARAMETERS #MCsig = array(f_events.variables['MC_sig'].data)# 2,1,0: MC, rotation, irregular #Vnsh = array(f_events.variables['wang_Vsh'].data) # veloc normal del shock ThetaSh = np.array(self.f_events.variables['wang_theta_shock'].data) # orientacion de la normal del shock i_V = self.f_events.variables[structure+'_V'].data.copy() # velocidad de icme i_B = self.f_events.variables[structure+'_B'].data.copy() # B del icme i_dt = self.f_events.variables[structure+'_dt'].data.copy() # B del icme i_dR = i_dt*(i_V*AU_o_km*sec_o_day) # values of the observables to use for filtering vfparam = get_fparam(self.f_events, self.fparam) #RatePre_Names = [] #--- seteamos miembros de 'self' q se llamen 'rate_pre_...' for vname in self.f_events.variables.keys(): if vname.startswith('rate_pre_'): #RatePre_Names += [ vname ] # save them to make checks later var = self.f_events.variables[vname].data.copy() setattr(self, vname, var) # asignamos 'rate_pre_...' a 'self' """ self.rate_pre = self.f_events.variables['rate_pre_McMurdo'].data.copy() self.rate_pre_Auger=self.f_events.variables['rate_pre_Auger'].data.copy() """ self.Afd = self.f_events.variables['A_FD'].data.copy() #------------------------------------ #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #++++++++++++++++++ begin: SELECCION DE EVENTOS ++++++++++++++++++++++ #------- filter dates BETW1998_2006 = np.ones(tb.n_icmes, dtype=bool) if FILTER['choose_1998-2006']: _until_jan98 = range(0, 26) # all events up to Jan/98 _after_dec06 = range(307, tb.n_icmes) # all after Dec/2006 for i in (_until_jan98 + _after_dec06): BETW1998_2006[i] = False # 'False' to exclude events #------- seleccionamos MCs con label-de-catalogo (lepping=2, etc) MC_FLAG = np.ones(tb.n_icmes, dtype=bool) for i in range(tb.n_icmes): MC_FLAG[i] = tb.MCsig[i] in FILTER['MCwant']['flags'] #------- excluimos eventos de 2MCs EVENTS_with_2MCs= (26, 148, 259, 295) MCmultiple = FILTER['Mcmultiple'] #False #True para incluir eventos multi-MC MCmulti = np.ones(tb.n_icmes, dtype=bool) # False para eventos multi-MC (SI, escribi bien) if(~FILTER['Mcmultiple']): for i in EVENTS_with_2MCs: MCmulti[i] &= False #------- orientacion del shock (catalogo Wang) if FILTER['wang']: ThetaThres = self.CUTS['ThetaThres'] ThetaCond = thetacond(ThetaThres, ThetaSh) # set lower threshold #------- duration of sheaths self.dt_mc = diff_dates(tb.tend_mc, tb.tini_mc)/day # [day] self.dt_sh = diff_dates(tb.tini_mc, tb.tshck)/day # [day] dt = diff_dates(self.bd.tend, self.bd.tini)/day DURATION = dt > dTday # sheaths>0 #------- speed of icmes if FILTER['vsw_filter']: v_lo = self.CUTS['v_lo'] v_hi = self.CUTS['v_hi'] SpeedCond = (vfparam>=v_lo) & (vfparam<v_hi) #------- z expansion (a. gulisano) z_exp = self.z_exp if FILTER['z_filter_on']: z_lo = self.CUTS['z_lo'] z_hi = self.CUTS['z_hi'] z_cond = (z_exp>=z_lo) & (z_exp<z_hi) #------- <B> of icmes if FILTER['B_filter']: B_lo = self.CUTS['B_lo'] B_hi = self.CUTS['B_hi'] BfieldCond = (i_B>=B_lo) & (i_B<B_hi) #------- size of icmes if FILTER['filter_dR.icme']: dR_lo = self.CUTS['dR_lo'] dR_hi = self.CUTS['dR_hi'] """print " ---> i_dR: \n", i_dR print " ---> i_dt: \n", i_dt raw_input()""" dRicmeCond = (i_dR>=dR_lo) & (i_dR<dR_hi) #------- filtro total SELECC = np.ones(tb.n_icmes, dtype=bool) SELECC &= BETW1998_2006 # nos mantenemos en este periodo de anios SELECC &= MCmulti # nubes multiples SELECC &= MC_FLAG # catalogo de nubes SELECC &= DURATION # no queremos sheaths q duran 1hr xq solo aportan ruido if FILTER['wang']: SELECC &= ThetaCond # cerca a 180 es nariz del shock if FILTER['vsw_filter']: SELECC &= SpeedCond if FILTER['z_filter_on']: SELECC &= z_cond if FILTER['B_filter']: SELECC &= BfieldCond if FILTER['filter_dR.icme']: SELECC &= dRicmeCond self.SELECC = SELECC self.n_SELECC = len(find(SELECC)) #self.aux['SELECC'] = self.SELECC #+++++++++++++++++ end: SELECCION DE EVENTOS ++++++++++++++++++++ #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ if self.n_SELECC<=0: print ccl.Rn + "\n --------> FATAL ERROR!!!: self.n_SELECC=<0" print " exiting....... \n" + ccl.W raise SystemExit def get_fparam(finp, fparam='mc_V'): """ you can implement more acceptable fparam values, that can imply an operation of several keys of the finp.variable.keys() for instance. """ # keys of the usual .nc file _keys_of_netcdf_file = ['sh.mc_V', 'mc_V', 'sh.mc_B', 'mc_B'] _keys_of_netcdf_file += ['sh.i_V', 'i_V', 'sh.i_B', 'i_B'] # check if it's a valid `fparam` && extract if fparam in _keys_of_netcdf_file: values = finp.variables[fparam].data.copy() else: raise SystemExit('\n [-] Unrecognized fparam value: '+fparam+'\n') return values class RichTable(object): def __init__(s, fname_rich): s.fname_rich = fname_rich s.tshck = [] s.tini_icme, s.tend_icme = [], [] s.tini_mc, s.tend_mc = [], [] s.Qicme = [] s.MCsig = [] s.Dst = [] def read(s): print "\n ---> reading Richardson's table: %s" % s.fname_rich frich = open(s.fname_rich, 'r') print " file read." ll, n = [], 0 for line in frich: ll += [line.split(',')] n +=1 print " lineas leidas: %d" % n for i in range(1,n): #------ fecha shock s.tshck += [datetime.strptime(ll[i][1][1:20],"%Y-%m-%d %H:%M:%S")] #------ fecha ini icme ss = ll[i][2][1:11].split() # string de la fecha ini-icme HH = int(ss[1][0:2]) MM = int(ss[1][2:4]) mm = int(ss[0].split('/')[0]) dd = int(ss[0].split('/')[1]) if mm==s.tshck[i-1].month: yyyy = s.tshck[i-1].year else: yyyy = s.tshck[i-1].year + 1 s.tini_icme += [datetime(yyyy, mm, dd, HH, MM)] #------ fecha fin icme ss = ll[i][3][1:11].split() HH = int(ss[1][0:2]) MM = int(ss[1][2:4]) mm = int(ss[0].split('/')[0]) dd = int(ss[0].split('/')[1]) if mm==s.tshck[i-1].month: yyyy = s.tshck[i-1].year elif s.tshck[i-1].month==12: yyyy = s.tshck[i-1].year + 1 s.tend_icme += [datetime(yyyy, mm, dd, HH, MM)] #------ fechas MCs if ll[i][6]=='': s.tini_mc += [nan] s.tend_mc += [nan] else: hrs_ini = int(ll[i][6]) # col6 es inicio del MC dummy = ll[i][7].split('(') # col7 es fin del MC ndummy = len(dummy) if ndummy==1: hrs_end = int(ll[i][7]) else: hrs_end = int(ll[i][7].split('(')[0][1:]) s.tini_mc += [ s.tini_icme[i-1] + timedelta(hours=hrs_ini) ] s.tend_mc += [ s.tend_icme[i-1] + timedelta(hours=hrs_end) ] # calidad de ICME boundaries s.Qicme += [ ll[i][10] ] # quality of ICME boundaries # flag de MC s.MCsig += [ ll[i][15] ] #if ll[i][15]=='2H': # MCsig += [ 2 ] #else: # MCsig += [ int(ll[i][15]) ] # MC flag # s.Dst += [ int(ll[i][16]) ] # Dst #-------------------------------------- s.MCsig = np.array(s.MCsig) s.Dst = np.array(s.Dst) s.n_icmes = len(s.tshck) # """ col0 : id col1 : disturbance time col2 : ICME start col3 : ICME end col4 : Composition start col5 : Composition end col6 : MC start col7 : MC end col8 : BDE col9 : BIF col10: Quality of ICME boundaries (1=best) col11: dV --> 'S' indica q incluye shock col12: V_ICME col13: V_max col14: B col15: MC flag --> '0', '1', '2', '2H': irregular, B-rotation, MC, or MC of "Huttunen etal05" respectively. col16: Dst col17: V_transit col18: LASCO_CME --> time of associated event, generally the CME observed by SOHO/LASCO. A veces tiene 'H' por Halo. """ def Add2Date(date, days, hrs=0, BadFlag=np.nan): """ Mapping to add `days` and `hrs` to a given `datetime` object. NOTE: `days` can be fractional. """ if type(date) is not datetime: return BadFlag return date + timedelta(days=days, hours=hrs) def utc2date(t): date_utc = datetime(1970, 1, 1, 0, 0, 0, 0) date = date_utc + timedelta(days=(t/86400.)) return date def date2utc(date): date_utc = datetime(1970, 1, 1, 0, 0, 0, 0) utcsec = (date - date_utc).total_seconds() # [utc sec] return utcsec def ACEepoch2utc(AceEpoch): return AceEpoch + 820454400.0 class arg_to_datetime(argparse.Action): """ argparse-action to handle command-line arguments of the form "dd/mm/yyyy" (string type), and converts it to datetime object. """ def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(arg_to_datetime, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): #print '%r %r %r' % (namespace, values, option_string) dd,mm,yyyy = map(int, values.split('/')) value = datetime(yyyy,mm,dd) setattr(namespace, self.dest, value) class arg_to_utcsec(argparse.Action): """ argparse-action to handle command-line arguments of the form "dd/mm/yyyy" (string type), and converts it to UTC-seconds. """ def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(arg_to_utcsec, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): #print '%r %r %r' % (namespace, values, option_string) dd,mm,yyyy = map(int, values.split('/')) value = (datetime(yyyy,mm,dd)-datetime(1970,1,1)).total_seconds() setattr(namespace, self.dest, value) class My2DArray(object): """ wrapper around numpy array with: - flexible number of rows - records the maximum nrow requested NOTE: This was test for 1D and 2D arrays. """ def __init__(self, shape, dtype=np.float32): self.this = np.empty(shape, dtype=dtype) setattr(self, '__array__', self.this.__array__) def resize_rows(self, nx_new=None): """ Increment TWICE the size of axis=0, **without** losing data. """ sh_new = np.copy(self.this.shape) nx = self.this.shape[0] if nx_new is None: sh_new[0] = 2*sh_new[0] elif nx_new<=nx: return 0 # nothing to do else: sh_new[0] = nx_new tmp = self.this.copy() #print "----> tmp: ", tmp.shape new = np.zeros(sh_new) new[:nx] = tmp self.this = new """ for some reason (probably due to numpy implementation), if we don't do this, the: >>> print self.__array__() stucks truncated to the original size that was set in __init__() time. So we need to tell numpy our new resized shape! """ setattr(self, '__array__', self.this.__array__) def __get__(self, instance, owner): return self.this def __getitem__(self, i): return self.this[i] def __setitem__(self, i, value): """ We can safely use: >>> ma[n:n+m,:] = [...] assuming n+m is greater than our size in axis=0. """ stop = i if type(i)==slice: stop = i.stop elif type(i)==tuple: if type(i[0])==slice: """ in case: ma[n:n+m,:] = ... """ stop = i[0].stop else: stop = i[0] #--- if requested row exceeds limits, duplicate # our size in axis=0 if stop>=self.this.shape[0]: nx_new = self.this.shape[0] while nx_new<=stop: nx_new *= 2 self.resize_rows(nx_new) self.this[i] = value #--- register the maximum nrow requested. # NOTE here we are referring to size, and *not* row-index. self.max_nrow_used = stop+1 # (row-size, not row-index) def __getattr__(self, attnm): return getattr(self.this, attnm) def ACEepoch2date(ace_epoch): """ ace_epoch: seconds since 1/1/96 """ date = datetime(1996,1,1) + timedelta(seconds=ace_epoch) return date def date2ACEepoch(date): ace_o = datetime(1996,1,1) return (date - ace_o).total_seconds() #+++++++++++++++++++++++++++++++++ if __name__=='__main__': print " ---> this is a library!\n" #EOF
jimsrc/seatos
shared_lib/shared_funcs.py
Python
mit
54,269
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mathathon.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
randpdevs/chopper
manage.py
Python
mit
252
dictionary = {"GEEKS", "FOR", "QUIZ", "GO"} N, M = 3, 3 board = [['G','I','Z'], ['U','E','K'], ['Q','S','E']] class Graph: class Vertex: def __int__(self, v): self.val = v self.adj = [] def findWords(board=board): def search(node, word, visited): if node not in visited: visited.append(node) word.append(node.val) for adjNode in node.adj: search(node, word, visited) if word not in dictionary: word.pop() result = [] g = creategraph(board) for u in g.vertices(): visited = [] visited.append(u) word = "" for adj in u.adj: search(adj, word, visited) if word in dictionary: result.append(word) return result if __name__=="__main__": print(findWords())
carlb15/Python
boggle.py
Python
mit
785
#!/usr/bin/python # coding: utf8 from __future__ import absolute_import from geocoder.osm import Osm from geocoder.w3w import W3W from geocoder.bing import Bing from geocoder.here import Here from geocoder.yahoo import Yahoo from geocoder.baidu import Baidu from geocoder.tomtom import Tomtom from geocoder.arcgis import Arcgis from geocoder.ottawa import Ottawa from geocoder.yandex import Yandex from geocoder.google import Google from geocoder.mapbox import Mapbox from geocoder.maxmind import Maxmind from geocoder.location import Location from geocoder.opencage import OpenCage from geocoder.geonames import Geonames from geocoder.mapquest import Mapquest from geocoder.distance import Distance from geocoder.geolytica import Geolytica from geocoder.freegeoip import FreeGeoIP from geocoder.canadapost import Canadapost from geocoder.w3w_reverse import W3WReverse from geocoder.here_reverse import HereReverse from geocoder.bing_reverse import BingReverse from geocoder.yandex_reverse import YandexReverse from geocoder.mapbox_reverse import MapboxReverse from geocoder.google_reverse import GoogleReverse from geocoder.google_timezone import Timezone from geocoder.google_elevation import Elevation from geocoder.mapquest_reverse import MapquestReverse from geocoder.opencage_reverse import OpenCageReverse def get(location, **kwargs): """Get Geocode :param ``location``: Your search location you want geocoded. :param ``provider``: The geocoding engine you want to use. :param ``method``: Define the method (geocode, method). """ provider = kwargs.get('provider', 'bing').lower().strip() method = kwargs.get('method', 'geocode').lower().strip() options = { 'osm': {'geocode': Osm}, 'here': { 'geocode': Here, 'reverse': HereReverse, }, 'baidu': {'geocode': Baidu}, 'yahoo': {'geocode': Yahoo}, 'tomtom': {'geocode': Tomtom}, 'arcgis': {'geocode': Arcgis}, 'ottawa': {'geocode': Ottawa}, 'mapbox': { 'geocode': Mapbox, 'reverse': MapboxReverse, }, 'maxmind': {'geocode': Maxmind}, 'geonames': {'geocode': Geonames}, 'freegeoip': {'geocode': FreeGeoIP}, 'w3w': { 'geocode': W3W, 'reverse': W3WReverse, }, 'yandex': { 'geocode': Yandex, 'reverse': YandexReverse, }, 'mapquest': { 'geocode': Mapquest, 'reverse': MapquestReverse, }, 'geolytica': {'geocode': Geolytica}, 'canadapost': {'geocode': Canadapost}, 'opencage': { 'geocode': OpenCage, 'reverse': OpenCageReverse, }, 'bing': { 'geocode': Bing, 'reverse': BingReverse, }, 'google': { 'geocode': Google, 'reverse': GoogleReverse, 'timezone': Timezone, 'elevation': Elevation, }, } if isinstance(location, (list, dict)) and method == 'geocode': raise ValueError("Location should be a string") if provider not in options: raise ValueError("Invalid provider") else: if method not in options[provider]: raise ValueError("Invalid method") return options[provider][method](location, **kwargs) def distance(*args, **kwargs): """Distance tool measures the distance between two or multiple points. :param location: (min 2x locations) Your search location you want geocoded. :param units: (default=kilometers) Unit of measurement. > kilometers > miles > feet > meters """ return Distance(*args, **kwargs) def location(location, **kwargs): """Parser for different location formats """ return Location(location, **kwargs) def google(location, **kwargs): """Google Provider :param location: Your search location you want geocoded. :param method: (default=geocode) Use the following: > geocode > reverse > batch > timezone > elevation """ return get(location, provider='google', **kwargs) def mapbox(location, **kwargs): """Mapbox Provider :param location: Your search location you want geocoded. :param proximity: Search nearby [lat, lng] :param method: (default=geocode) Use the following: > geocode > reverse > batch """ return get(location, provider='mapbox', **kwargs) def yandex(location, **kwargs): """Yandex Provider :param location: Your search location you want geocoded. :param lang: Chose the following language: > ru-RU — Russian (by default) > uk-UA — Ukrainian > be-BY — Belarusian > en-US — American English > en-BR — British English > tr-TR — Turkish (only for maps of Turkey) :param kind: Type of toponym (only for reverse geocoding): > house - house or building > street - street > metro - subway station > district - city district > locality - locality (city, town, village, etc.) """ return get(location, provider='yandex', **kwargs) def w3w(location, **kwargs): """what3words Provider :param location: Your search location you want geocoded. :param key: W3W API key. :param method: Chose a method (geocode, method) """ return get(location, provider='w3w', **kwargs) def baidu(location, **kwargs): """Baidu Provider :param location: Your search location you want geocoded. :param key: Baidu API key. :param referer: Baidu API referer website. """ return get(location, provider='baidu', **kwargs) def ottawa(location, **kwargs): """Ottawa Provider :param location: Your search location you want geocoded. """ return get(location, provider='ottawa', **kwargs) def elevation(location, **kwargs): """Elevation - Google Provider :param location: Your search location you want to retrieve elevation data. """ return get(location, method='elevation', provider='google', **kwargs) def timezone(location, **kwargs): """Timezone - Google Provider :param location: Your search location you want to retrieve timezone data. :param timestamp: Define your own specified time to calculate timezone. """ return get(location, method='timezone', provider='google', **kwargs) def reverse(location, provider='google', **kwargs): """Reverse Geocoding :param location: Your search location you want to reverse geocode. :param key: (optional) use your own API Key from Bing. :param provider: (default=google) Use the following: > google > bing """ return get(location, method='reverse', provider=provider, **kwargs) def bing(location, **kwargs): """Bing Provider :param location: Your search location you want geocoded. :param key: (optional) use your own API Key from Bing. :param method: (default=geocode) Use the following: > geocode > reverse """ return get(location, provider='bing', **kwargs) def yahoo(location, **kwargs): """Yahoo Provider :param ``location``: Your search location you want geocoded. """ return get(location, provider='yahoo', **kwargs) def geolytica(location, **kwargs): """Geolytica (Geocoder.ca) Provider :param location: Your search location you want geocoded. """ return get(location, provider='geolytica', **kwargs) def opencage(location, **kwargs): """Opencage Provider :param ``location``: Your search location you want geocoded. :param ``key``: (optional) use your own API Key from OpenCage. """ return get(location, provider='opencage', **kwargs) def arcgis(location, **kwargs): """ArcGIS Provider :param ``location``: Your search location you want geocoded. """ return get(location, provider='arcgis', **kwargs) def here(location, **kwargs): """HERE Provider :param location: Your search location you want geocoded. :param app_code: (optional) use your own Application Code from HERE. :param app_id: (optional) use your own Application ID from HERE. :param method: (default=geocode) Use the following: > geocode > reverse """ return get(location, provider='here', **kwargs) def nokia(location, **kwargs): """HERE Provider :param location: Your search location you want geocoded. :param app_code: (optional) use your own Application Code from HERE. :param app_id: (optional) use your own Application ID from HERE. :param method: (default=geocode) Use the following: > geocode > reverse """ return get(location, provider='here', **kwargs) def tomtom(location, **kwargs): """TomTom Provider :param location: Your search location you want geocoded. :param key: (optional) use your own API Key from TomTom. """ return get(location, provider='tomtom', **kwargs) def mapquest(location, **kwargs): """MapQuest Provider :param location: Your search location you want geocoded. :param key: (optional) use your own API Key from MapQuest. :param method: (default=geocode) Use the following: > geocode > reverse """ return get(location, provider='mapquest', **kwargs) def osm(location, **kwargs): """OSM Provider :param location: Your search location you want geocoded. :param url: Custom OSM Server URL location (ex: http://nominatim.openstreetmap.org/search) """ return get(location, provider='osm', **kwargs) def maxmind(location='me', **kwargs): """MaxMind Provider :param location: Your search IP Address you want geocoded. :param location: (optional) if left blank will return your current IP address's location. """ return get(location, provider='maxmind', **kwargs) def freegeoip(location, **kwargs): """FreeGeoIP Provider :param location: Your search IP Address you want geocoded. :param location: (optional) if left blank will return your current IP address's location. """ return get(location, provider='freegeoip', **kwargs) def ip(location, **kwargs): """IP Address lookup :param location: Your search IP Address you want geocoded. :param location: (optional) if left blank will return your current IP address's location. """ return get(location, provider='maxmind', **kwargs) def canadapost(location, **kwargs): """CanadaPost Provider :param ``location``: Your search location you want geocoded. :param ``key``: (optional) API Key from CanadaPost Address Complete. """ return get(location, provider='canadapost', **kwargs) def postal(location, **kwargs): """CanadaPost Provider :param ``location``: Your search location you want geocoded. :param ``key``: (optional) use your own API Key from CanadaPost Address Complete. """ return get(location, provider='canadapost', **kwargs) def geonames(location, **kwargs): """GeoNames Provider :param ``location``: Your search location you want geocoded. :param ``username``: (required) needs to be passed with each request. """ return get(location, provider='geonames', **kwargs)
miraculixx/geocoder
geocoder/api.py
Python
mit
11,482
from django.conf.urls.defaults import patterns, url from urls import urlpatterns as main_patterns ALL_VERSIONS_RE = '(?P<version>.+)' urlpatterns = patterns( '', # base view, flake8 complains if it is on the previous line. url('^$', 'djangome.views.redirect_home', {'version': 'latest'}), url('^(?P<term>[\w\-\.]+)$', 'djangome.views.redirect_to_term', {'version': 'latest'}), url('^(?P<term>[\w\-\.]+)/stats$', 'djangome.views.show_term', {'version': 'latest'}), url('^%s/(?P<term>[\w\-\.]+)$' % ALL_VERSIONS_RE, 'djangome.views.redirect_to_term', name='redirect_to_term'), url('^%s/(?P<term>[\w\-\.]+)/stats$' % ALL_VERSIONS_RE, 'djangome.views.show_term', name='show_term'), ) urlpatterns += main_patterns
phalcon/readthedocs.org
readthedocs/core/djangome_urls.py
Python
mit
821
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class TaskIdRange(Model): """A range of task IDs that a task can depend on. All tasks with IDs in the range must complete successfully before the dependent task can be scheduled. The start and end of the range are inclusive. For example, if a range has start 9 and end 12, then it represents tasks '9', '10', '11' and '12'. :param start: The first task ID in the range. :type start: int :param end: The last task ID in the range. :type end: int """ _validation = { 'start': {'required': True}, 'end': {'required': True}, } _attribute_map = { 'start': {'key': 'start', 'type': 'int'}, 'end': {'key': 'end', 'type': 'int'}, } def __init__(self, start, end): super(TaskIdRange, self).__init__() self.start = start self.end = end
lmazuel/azure-sdk-for-python
azure-batch/azure/batch/models/task_id_range.py
Python
mit
1,362
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from wtforms.fields import BooleanField, SelectField, TextAreaField from wtforms.validators import DataRequired, ValidationError from indico.modules.events.models.events import EventType from indico.util.date_time import now_utc from indico.util.i18n import _ from indico.web.forms.base import IndicoForm, generated_data from indico.web.forms.fields import EmailListField, IndicoDateTimeField, IndicoRadioField, TimeDeltaField from indico.web.forms.validators import DateTimeRange, HiddenUnless class ReminderForm(IndicoForm): recipient_fields = ['recipients', 'send_to_participants', 'send_to_speakers'] schedule_fields = ['schedule_type', 'absolute_dt', 'relative_delta'] schedule_recipient_fields = recipient_fields + schedule_fields # Schedule schedule_type = IndicoRadioField(_('Type'), [DataRequired()], choices=[('relative', _('Relative to the event start time')), ('absolute', _('Fixed date/time')), ('now', _('Send immediately'))]) relative_delta = TimeDeltaField(_('Offset'), [HiddenUnless('schedule_type', 'relative'), DataRequired()]) absolute_dt = IndicoDateTimeField(_('Date'), [HiddenUnless('schedule_type', 'absolute'), DataRequired(), DateTimeRange()]) # Recipients recipients = EmailListField(_('Email addresses'), description=_('One email address per line.')) send_to_participants = BooleanField(_('Participants'), description=_('Send the reminder to all participants/registrants ' 'of the event.')) send_to_speakers = BooleanField(_('Speakers'), description=_('Send the reminder to all speakers/chairpersons of the event.')) # Misc reply_to_address = SelectField(_('Sender'), [DataRequired()], description=_('The email address that will show up as the sender.')) message = TextAreaField(_('Note'), description=_('A custom message to include in the email.')) include_summary = BooleanField(_('Include agenda'), description=_("Includes a simple text version of the event's agenda in the email.")) include_description = BooleanField(_('Include description'), description=_("Includes the event's description in the email.")) attach_ical = BooleanField(_('Attach iCalendar file'), description=_('Attach an iCalendar file to the event reminder.')) def __init__(self, *args, **kwargs): self.event = kwargs.pop('event') self.timezone = self.event.timezone super().__init__(*args, **kwargs) self.reply_to_address.choices = (list(self.event .get_allowed_sender_emails(extra=self.reply_to_address.object_data).items())) if self.event.type_ == EventType.lecture: del self.include_summary def validate_recipients(self, field): if not field.data and not self.send_to_participants.data and not self.send_to_speakers.data: raise ValidationError(_('At least one type of recipient is required.')) def validate_send_to_participants(self, field): if not field.data and not self.recipients.data and not self.send_to_speakers.data: raise ValidationError(_('At least one type of recipient is required.')) def validate_send_to_speakers(self, field): if not field.data and not self.recipients.data and not self.send_to_participants.data: raise ValidationError(_('At least one type of recipient is required.')) def validate_schedule_type(self, field): # Be graceful and allow a reminder that's in the past but on the same day. # It will be sent immediately but that way we are a little bit more user-friendly if field.data == 'now': return scheduled_dt = self.scheduled_dt.data if scheduled_dt is not None and scheduled_dt.date() < now_utc().date(): raise ValidationError(_('The specified date is in the past')) @generated_data def scheduled_dt(self): if self.schedule_type.data == 'absolute': if self.absolute_dt.data is None: return None return self.absolute_dt.data elif self.schedule_type.data == 'relative': if self.relative_delta.data is None: return None return self.event.start_dt - self.relative_delta.data elif self.schedule_type.data == 'now': return now_utc() @generated_data def event_start_delta(self): return self.relative_delta.data if self.schedule_type.data == 'relative' else None
ThiefMaster/indico
indico/modules/events/reminders/forms.py
Python
mit
5,113
import os from click import confirm from getgist import GetGistCommons class LocalTools(GetGistCommons): """Helpers to deal with local files and local file system""" def __init__(self, filename, assume_yes=False): """ Sets the file name to be used by the instance. :param filename: (str) local file name (ro be read or written) :param assume_yes: (bool) assume yes (or first option) for all prompts return: (None) """ self.cwd = os.getcwd() self.file_path = os.path.expanduser(filename) self.filename = os.path.basename(filename) self.assume_yes = assume_yes def save(self, content): """ Save any given content to the instance file. :param content: (str or bytes) :return: (None) """ # backup existing file if needed if os.path.exists(self.file_path) and not self.assume_yes: message = "Overwrite existing {}? (y/n) " if not confirm(message.format(self.filename)): self.backup() # write file self.output("Saving " + self.filename) with open(self.file_path, "wb") as handler: if not isinstance(content, bytes): content = bytes(content, "utf-8") handler.write(content) self.yeah("Done!") def backup(self): """Backups files with the same name of the instance filename""" count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup) def read(self, file_path=None): """ Read the contents of a file. :param filename: (str) path to a file in the local file system :return: (str) contents of the file, or (False) if not found/not a file """ if not file_path: file_path = self.file_path # abort if the file path does not exist if not os.path.exists(file_path): self.oops("Sorry, but {} does not exist".format(file_path)) return False # abort if the file path is not a file if not os.path.isfile(file_path): self.oops("Sorry, but {} is not a file".format(file_path)) return False with open(file_path) as handler: return handler.read()
cuducos/getgist
getgist/local.py
Python
mit
2,630
from fastapi.testclient import TestClient from docs_src.metadata.tutorial001 import app client = TestClient(app) openapi_schema = { "openapi": "3.0.2", "info": { "title": "ChimichangApp", "description": "\nChimichangApp API helps you do awesome stuff. 🚀\n\n## Items\n\nYou can **read items**.\n\n## Users\n\nYou will be able to:\n\n* **Create users** (_not implemented_).\n* **Read users** (_not implemented_).\n", "termsOfService": "http://example.com/terms/", "contact": { "name": "Deadpoolio the Amazing", "url": "http://x-force.example.com/contact/", "email": "[email protected]", }, "license": { "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0.html", }, "version": "0.0.1", }, "paths": { "/items/": { "get": { "summary": "Read Items", "operationId": "read_items_items__get", "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, } }, } } }, } def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == openapi_schema def test_items(): response = client.get("/items/") assert response.status_code == 200, response.text assert response.json() == [{"name": "Katana"}]
tiangolo/fastapi
tests/test_tutorial/test_metadata/test_tutorial001.py
Python
mit
1,611
from marshmallow import post_load from marshmallow_sqlalchemy import field_for from models import ma, User, Device, Measurement class UserSchema(ma.Schema): id = field_for(User, 'id', dump_only=True) class Meta: # Fields to expose fields = ('id', 'name', 'username') model = User @post_load def make_user(self, data): return User(**data) class DeviceSchema(ma.Schema): id = field_for(Device, 'id', dump_only=True) class Meta: # Fields to expose fields = ('id', 'model', 'manufacturerID') model = Device @post_load def make_device(self, data): return Device(**data) class MeasurementSchema(ma.Schema): id = field_for(Measurement, 'id', dump_only=True) class Meta: # Fields to expose model = Measurement @post_load def make_measurement(self, data): return Measurement(**data)
KMSkelton/cgm_flask
schema.py
Python
mit
920
#coding:utf8 """ yet another logging wrapper - log level logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG """ import logging from config import PROJECT_ROOT def configLogger(logFile="spider.log", logLevel=logging.DEBUG, logTree=""): logFile = PROJECT_ROOT+"/log/"+ logFile '''配置logging的日志文件以及日志的记录等级''' logger = logging.getLogger(logTree) formatter = logging.Formatter( '%(asctime)s %(threadName)s %(levelname)s %(message)s') try: fileHandler = logging.FileHandler(logFile) except IOError, e: raise IOError else: fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) logger.setLevel(logLevel) return logger
zhkzyth/a-super-fast-crawler
logmanager.py
Python
mit
783
{ "uidPageMap": { W3Const.w3PropType: W3Const.w3TypePanel, W3Const.w3PropSubUI: [ "uidMSMap" ] }, # Map "uidMSMap": { W3Const.w3PropType: W3Const.w3TypeMap, W3Const.w3PropMap: { W3Const.w3AttrMapLocation: "", W3Const.w3AttrMapKey: "" }, W3Const.w3PropCSS: { "border": "2px solid", "min-width": "800px", "min-height": "600px" } } }
eddiedb6/ej
web/metadata/EJUIMapPage.py
Python
mit
493
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class CloudJob(Model): """An Azure Batch job. :param id: A string that uniquely identifies the job within the account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an account that differ only by case). :type id: str :param display_name: The display name for the job. :type display_name: str :param uses_task_dependencies: Whether tasks in the job can define dependencies on each other. The default is false. :type uses_task_dependencies: bool :param url: The URL of the job. :type url: str :param e_tag: The ETag of the job. This is an opaque string. You can use it to detect whether the job has changed between requests. In particular, you can be pass the ETag when updating a job to specify that your changes should take effect only if nobody else has modified the job in the meantime. :type e_tag: str :param last_modified: The last modified time of the job. This is the last time at which the job level data, such as the job state or priority, changed. It does not factor in task-level changes such as adding new tasks or tasks changing state. :type last_modified: datetime :param creation_time: The creation time of the job. :type creation_time: datetime :param state: The current state of the job. Possible values include: 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', 'deleting' :type state: str or ~azure.batch.models.JobState :param state_transition_time: The time at which the job entered its current state. :type state_transition_time: datetime :param previous_state: The previous state of the job. This property is not set if the job is in its initial Active state. Possible values include: 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', 'deleting' :type previous_state: str or ~azure.batch.models.JobState :param previous_state_transition_time: The time at which the job entered its previous state. This property is not set if the job is in its initial Active state. :type previous_state_transition_time: datetime :param priority: The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. :type priority: int :param constraints: The execution constraints for the job. :type constraints: ~azure.batch.models.JobConstraints :param job_manager_task: Details of a Job Manager task to be launched when the job is started. :type job_manager_task: ~azure.batch.models.JobManagerTask :param job_preparation_task: The Job Preparation task. The Job Preparation task is a special task run on each node before any other task of the job. :type job_preparation_task: ~azure.batch.models.JobPreparationTask :param job_release_task: The Job Release task. The Job Release task is a special task run at the end of the job on each node that has run any other task of the job. :type job_release_task: ~azure.batch.models.JobReleaseTask :param common_environment_settings: The list of common environment variable settings. These environment variables are set for all tasks in the job (including the Job Manager, Job Preparation and Job Release tasks). Individual tasks can override an environment setting specified here by specifying the same setting name with a different value. :type common_environment_settings: list[~azure.batch.models.EnvironmentSetting] :param pool_info: The pool settings associated with the job. :type pool_info: ~azure.batch.models.PoolInformation :param on_all_tasks_complete: The action the Batch service should take when all tasks in the job are in the completed state. noAction - do nothing. The job remains active unless terminated or disabled by some other means. terminateJob - terminate the job. The job's terminateReason is set to 'AllTasksComplete'. The default is noAction. Possible values include: 'noAction', 'terminateJob' :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete :param on_task_failure: The action the Batch service should take when any task in the job fails. A task is considered to have failed if has a failureInfo. A failureInfo is set if the task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the task, for example due to a resource file download error. noAction - do nothing. performExitOptionsJobAction - take the action associated with the task exit condition in the task's exitConditions collection. (This may still result in no action being taken, if that is what the task specifies.) The default is noAction. Possible values include: 'noAction', 'performExitOptionsJobAction' :type on_task_failure: str or ~azure.batch.models.OnTaskFailure :param metadata: A list of name-value pairs associated with the job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. :type metadata: list[~azure.batch.models.MetadataItem] :param execution_info: The execution information for the job. :type execution_info: ~azure.batch.models.JobExecutionInformation :param stats: Resource usage statistics for the entire lifetime of the job. :type stats: ~azure.batch.models.JobStatistics """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, 'url': {'key': 'url', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, 'state': {'key': 'state', 'type': 'JobState'}, 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, 'previous_state': {'key': 'previousState', 'type': 'JobState'}, 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, 'priority': {'key': 'priority', 'type': 'int'}, 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, 'stats': {'key': 'stats', 'type': 'JobStatistics'}, } def __init__(self, id=None, display_name=None, uses_task_dependencies=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, execution_info=None, stats=None): self.id = id self.display_name = display_name self.uses_task_dependencies = uses_task_dependencies self.url = url self.e_tag = e_tag self.last_modified = last_modified self.creation_time = creation_time self.state = state self.state_transition_time = state_transition_time self.previous_state = previous_state self.previous_state_transition_time = previous_state_transition_time self.priority = priority self.constraints = constraints self.job_manager_task = job_manager_task self.job_preparation_task = job_preparation_task self.job_release_task = job_release_task self.common_environment_settings = common_environment_settings self.pool_info = pool_info self.on_all_tasks_complete = on_all_tasks_complete self.on_task_failure = on_task_failure self.metadata = metadata self.execution_info = execution_info self.stats = stats
AutorestCI/azure-sdk-for-python
azure-batch/azure/batch/models/cloud_job.py
Python
mit
9,338
from django.shortcuts import render from django.http import HttpResponse, HttpResponseForbidden, \ HttpResponseRedirect, HttpResponseBadRequest, HttpResponseNotAllowed import logging log = logging.getLogger('client_app') # Create your views here. from .oauthclient import * OAUTH_URL = 'http://192.168.243.5:39000' # ой всё, ни слова про DNS CLIENT_ID = 'test_client_lab01' CLIENT_URL = 'http://192.168.243.5:39001' def index(request): log_request(request) if request.method == 'GET': # check if we have code in parameters code = request.GET.get('code', None) if code: # get token gp = GrantPlugin(code) client = OAuthClient(OAUTH_URL, gp, CLIENT_ID, 'clientsecret', CLIENT_URL) try: tclient = client.make_token_client() except ExpiredException as ee: tokens = str(ee) verify_status = 'None, auth_code expired!' else: tokens = tclient.auth_plugin.token_raw try: verify_status = tclient.verify() except ExpiredException as ee: verify_status = str(ee) # let's render template response = render(request, 'index.html', {'grant': code, 'tokens': tokens, 'verify_status': verify_status, }) else: # let's redirect for authorization data = {} data['response_type'] = 'code' data['client_id'] = CLIENT_ID data['redirect_uri'] = CLIENT_URL redirect_uri = OAUTH_URL + '/auth' + query_string(data) response = HttpResponseRedirect(redirect_uri) # first check if we have auth_grant in request else: response = HttpResponseNotAllowed() log.debug('response:\n' + str(response.serialize())) return response def rdict(request): if request.method == 'GET': return request.GET elif request.method == 'POST': return request.POST return None def log_request(request): log.debug(str(request)) log.debug('GET: ' + str(request.GET)) log.debug('POST: ' + str(request.POST)) log.debug('Cookies:\n' + repr(request.COOKIES)) def query_string(params): str = '?' for k in params: if len(str) > 1: str = str + '&' str = str + k + '=' + params[k] return str
Boris-Barboris/rsoi
lab01/client/client_app/views.py
Python
mit
2,533
from __future__ import annotations import cmath import numpy as np import psutil import ray import scipy.special as ssp from pymwm.utils import cylinder_utils from pymwm.waveguide import Database, Sampling, Waveguide from .samples import Samples, SamplesForRay, SamplesLowLoss, SamplesLowLossForRay class Cylinder(Waveguide): """A class defining a cylindrical waveguide.""" def __init__(self, params): """Init Cylinder class. Args: params: A dict whose keys and values are as follows: 'core': A dict of the setting parameters of the core: 'shape': A string indicating the shape of the core. 'size': A float indicating the radius of the circular cross section [um]. 'fill': A dict of the parameters of the core Material. 'clad': A dict of the parameters of the clad Material. 'bounds': A dict indicating the bounds of database.interpolation and its keys and values are as follows: 'wl_max': A float indicating the maximum wavelength [um] 'wl_min': A float indicating the minimum wavelength [um] 'wl_imag': A float indicating the maximum value of abs(c / f_imag) [um] where f_imag is the imaginary part of the frequency. 'modes': A dict of the settings for calculating modes: 'wl_max': A float indicating the maximum wavelength [um] (default: 5.0) 'wl_min': A float indicating the minimum wavelength [um] (default: 0.4) 'wl_imag': A float indicating the maximum value of abs(c / f_imag) [um] where f_imag is the imaginary part of the frequency. (default: 5.0) 'dw': A float indicating frequency interval [rad c / 1um]=[2.99792458e14 rad / s] (default: 1 / 64). 'num_n': An integer indicating the number of orders of modes. 'num_m': An integer indicating the number of modes in each order and polarization. 'ls': A list of characters chosen from "h" (horizontal polarization) and "v" (vertical polarization). """ super().__init__(params) self.u_pec, self.jnu_pec, self.jnpu_pec = self.u_jnu_jnpu_pec( self.num_n, self.num_m ) def get_alphas(self, alpha_list: list[tuple[str, int, int]]) -> dict: alphas: dict = {"h": [], "v": []} for alpha in [("E", 0, m) for m in range(1, self.num_m + 1)]: if alpha in alpha_list: alphas["v"].append(alpha) for alpha in [ ("E", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1) ]: if alpha in alpha_list: alphas["h"].append(alpha) alphas["v"].append(alpha) for alpha in [("M", 0, m) for m in range(1, self.num_m + 1)]: if alpha in alpha_list: alphas["h"].append(alpha) for alpha in [ ("M", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1) ]: if alpha in alpha_list: alphas["h"].append(alpha) alphas["v"].append(alpha) return alphas def betas_convs_samples(self, params: dict) -> tuple[dict, dict, Samples]: im_factor = self.clad.im_factor self.clad.im_factor = 1.0 self.clad_params["im_factor"] = 1.0 p_modes = params["modes"].copy() num_n_0 = p_modes["num_n"] num_m_0 = p_modes["num_m"] betas: dict = {} convs: dict = {} success = False catalog = Database().load_catalog() num_n_max = catalog["num_n"].max() num_m_max = catalog["num_m"].max() if not np.isnan(num_n_max): for num_n, num_m in [ (n, m) for n in range(num_n_0, num_n_max + 1) for m in range(num_m_0, num_m_max + 1) ]: p_modes["num_n"] = num_n p_modes["num_m"] = num_m smp = Samples(self.r, self.fill_params, self.clad_params, p_modes) try: betas, convs = smp.database.load() success = True break except IndexError: continue if not success: p_modes["num_n"] = num_n_0 p_modes["num_m"] = num_m_0 betas, convs, smp = self.do_sampling(p_modes) if im_factor != 1.0: self.clad.im_factor = im_factor self.clad_params["im_factor"] = im_factor betas, convs, smp = self.do_sampling_for_im_factor(betas, convs, p_modes) return betas, convs, smp def do_sampling(self, p_modes: dict) -> tuple[dict, dict, Samples]: num_n_0 = p_modes["num_n"] num_m_0 = p_modes["num_m"] smp = Samples(self.r, self.fill_params, self.clad_params, p_modes) ray.shutdown() try: ray.init() p_modes_id = ray.put(p_modes) pool = ray.util.ActorPool( SamplesForRay.remote( self.r, self.fill_params, self.clad_params, p_modes_id ) for _ in range(psutil.cpu_count()) ) xs_success_wr_list: list[tuple[np.ndarray, np.ndarray]] = list( pool.map(lambda a, arg: a.wr_sampling.remote(arg), range(num_n_0)) ) num_wr = xs_success_wr_list[0][0].shape[0] args = [] for n in range(num_n_0): xs_array, _ = xs_success_wr_list[n] for iwr in range(num_wr): args.append((n, iwr, xs_array[iwr])) xs_success_wi_list: list[tuple[np.ndarray, np.ndarray]] = list( pool.map(lambda a, arg: a.wi_sampling.remote(arg), args) ) num_wi = xs_success_wi_list[0][0].shape[0] xs_success_list: list[tuple[np.ndarray, np.ndarray]] = [] for n in range(num_n_0): xs_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=complex) success_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=bool) for iwr in range(num_wr): i = num_wr * n + iwr xs_i, success_i = xs_success_wi_list[i] xs_array[iwr] = xs_i success_array[iwr] = success_i xs_success_list.append((xs_array, success_array)) finally: ray.shutdown() betas, convs = smp.betas_convs(xs_success_list) smp.database.save(betas, convs) return betas, convs, smp def do_sampling_for_im_factor( self, betas: dict, convs: dict, p_modes: dict ) -> tuple[dict, dict, SamplesLowLoss]: smp = SamplesLowLoss(self.r, self.fill_params, self.clad_params, p_modes) try: betas, convs = smp.database.load() except IndexError: num_n = p_modes["num_n"] num_m = p_modes["num_m"] args = [] for iwr in range(len(smp.ws)): for iwi in range(len(smp.wis)): xis_list = [] for n in range(num_n): xis = [] for i in range(num_m + 1): xis.append(betas[("M", n, i + 1)][iwr, iwi] ** 2) for i in range(num_m): xis.append(betas[("E", n, i + 1)][iwr, iwi] ** 2) xis_list.append(xis) args.append((iwr, iwi, xis_list)) try: ray.init() p_modes_id = ray.put(p_modes) pool = ray.util.ActorPool( SamplesLowLossForRay.remote( self.r, self.fill_params, self.clad_params, p_modes_id ) for _ in range(psutil.cpu_count()) ) xs_success_list = list( pool.map(lambda a, arg: a.task.remote(arg), args) ) finally: ray.shutdown() betas, convs = smp.betas_convs(xs_success_list) smp.database.save(betas, convs) return betas, convs, smp def beta(self, w: complex, alpha: tuple[str, int, int]) -> complex: """Return phase constant Args: w: A complex indicating the angular frequency alpha: (pol, n, m) pol: 'M' (TM-like mode) or 'E' (TE-like mode) n: The order of the mode m: The sub order of the mode. Returns: h: The phase constant. """ if self.clad.label == "PEC": return self.beta_pec(w, alpha) wr = w.real wi = w.imag hr: float = self.beta_funcs[(alpha, "real")](wr, wi)[0, 0] hi: float = self.beta_funcs[(alpha, "imag")](wr, wi)[0, 0] # if hr < 0: # hr = 1e-16 # if hi < 0: # hi = 1e-16 return hr + 1j * hi def beta_pec(self, w: complex, alpha: tuple[str, int, int]) -> complex: """Return phase constant of PEC waveguide Args: w: A complex indicating the angular frequency alpha: A tuple (pol, n, m) where pol is 'M' for TM mode or 'E' for TE mode, n is the order of the mode, and m is the number of modes in the order and the polarization. Returns: h: A complex indicating the phase constant. """ w_comp = w.real + 1j * w.imag pol, n, m = alpha if pol == "E": chi = ssp.jnp_zeros(n, m)[-1] elif pol == "M": chi = ssp.jn_zeros(n, m)[-1] else: raise ValueError("pol must be 'E' or 'M") val = cmath.sqrt(self.fill(w_comp) * w_comp ** 2 - chi ** 2 / self.r ** 2) if abs(val.real) > abs(val.imag): if val.real < 0: val *= -1 else: if val.imag < 0: val *= -1 return val def coef(self, h, w, alpha): """Return the coefficients of TE- and TM- components which compose the hybrid mode. Args: h: A complex indicating the phase constant. w: A complex indicating the angular frequency alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or 'E' for TE-like mode, n is the order of the mode, and m is the number of modes in the order and the polarization. Returns: a: A complex indicating the coefficient of TE-component b: A complex indicating the coefficient of TM-component """ e1 = self.fill(w) e2 = self.clad(w) pol, n, m = alpha w = w.real + 1j * w.imag h = h.real + 1j * h.imag if e2.real < -1e6: if pol == "E": norm = self.norm(w, h, alpha, 1.0 + 0.0j, 0.0j) ai, bi = 1.0 / norm, 0.0 else: norm = self.norm(w, h, alpha, 0.0j, 1.0 + 0.0j) ai, bi = 0.0, 1.0 / norm else: u = self.samples.u(h ** 2, w, e1) v = self.samples.v(h ** 2, w, e2) knv = ssp.kv(n, v) knpv = ssp.kvp(n, v) jnu = ssp.jv(n, u) jnpu = ssp.jvp(n, u) ci = -n * (u ** 2 + v ** 2) * jnu * knv / (u * v) if pol == "E": ci *= (h / w) ** 2 ci /= e1 * jnpu * v * knv + e2 * knpv * u * jnu norm = self.norm(w, h, alpha, 1.0 + 0.0j, ci) ai = 1.0 / norm bi = ci / norm else: ci /= jnpu * v * knv + knpv * u * jnu norm = self.norm(w, h, alpha, ci, 1.0 + 0.0j) bi = 1.0 / norm ai = ci / norm return ai, bi def norm(self, w, h, alpha, a, b): pol, n, m = alpha en = 1 if n == 0 else 2 if self.clad(w).real < -1e6: radius = self.r if pol == "E": u = ssp.jnp_zeros(n, m)[-1] jnu = ssp.jv(n, u) jnpu = 0.0 else: u = ssp.jn_zeros(n, m)[-1] jnu = 0.0 jnpu = ssp.jvp(n, u) return cmath.sqrt( a ** 2 * np.pi * radius ** 2 / en * (1 - n ** 2 / u ** 2) * jnu ** 2 + b ** 2 * np.pi * radius ** 2 / en * jnpu ** 2 ) u = self.samples.u(h ** 2, w, self.fill(w)) jnu = ssp.jv(n, u) jnpu = ssp.jvp(n, u) v = self.samples.v(h ** 2, w, self.clad(w)) knv = ssp.kv(n, v) knpv = ssp.kvp(n, v) val_u = 2 * np.pi * self.r ** 2 / en val_v = val_u * ((u * jnu) / (v * knv)) ** 2 upart_diag = self.upart_diag(n, u, jnu, jnpu) vpart_diag = self.vpart_diag(n, v, knv, knpv) upart_off = self.upart_off(n, u, jnu) vpart_off = self.vpart_off(n, v, knv) return cmath.sqrt( val_u * ( a * (a * upart_diag + b * upart_off) + b * (b * upart_diag + a * upart_off) ) - val_v * ( a * (a * vpart_diag + b * vpart_off) + b * (b * vpart_diag + a * vpart_off) ) ) @staticmethod def upart_diag(n, u, jnu, jnpu): return jnu * jnpu / u + (jnpu ** 2 + (1 - n ** 2 / u ** 2) * jnu ** 2) / 2 @staticmethod def upart_off(n, u, jnu): return n * (jnu / u) ** 2 @staticmethod def vpart_diag(n, v, knv, knpv): return knv * knpv / v + (knpv ** 2 - (1 + n ** 2 / v ** 2) * knv ** 2) / 2 @staticmethod def vpart_off(n, v, knv): return n * (knv / v) ** 2 def Y( self, w: complex, h: complex, alpha: tuple[str, int, int], a: complex, b: complex, ) -> complex: """Return the effective admittance of the waveguide mode Args: w: A complex indicating the angular frequency h: A complex indicating the phase constant. alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or 'E' for TE-like mode, n is the order of the mode, and m is the number of modes in the order and the polarization. a: A complex indicating the coefficient of TE-component b: A complex indicating the coefficient of TM-component Returns: y: A complex indicating the effective admittance """ pol, n, m = alpha e1 = self.fill(w) e2 = self.clad(w) en = 1 if n == 0 else 2 if e2.real < -1e6: if pol == "E": val = h / w else: val = e1 * w / h else: u = self.samples.u(h ** 2, w, e1) jnu = ssp.jv(n, u) jnpu = ssp.jvp(n, u) v = self.samples.v(h ** 2, w, e2) knv = ssp.kv(n, v) knpv = ssp.kvp(n, v) val_u = 2 * np.pi * self.r ** 2 / en val_v = val_u * ((u * jnu) / (v * knv)) ** 2 upart_diag = self.upart_diag(n, u, jnu, jnpu) vpart_diag = self.vpart_diag(n, v, knv, knpv) upart_off = self.upart_off(n, u, jnu) vpart_off = self.vpart_off(n, v, knv) val = val_u * ( h / w * a * (a * upart_diag + b * upart_off) + e1 * w / h * b * (b * upart_diag + a * upart_off) ) - val_v * ( h / w * a * (a * vpart_diag + b * vpart_off) + e2 * w / h * b * (b * vpart_diag + a * vpart_off) ) return val @staticmethod def y_te(w, h): return h / w def y_tm_inner(self, w, h): e = self.fill(w) return e * w / h def y_tm_outer(self, w, h): e = self.clad(w) return e * w / h def fields(self, x, y, w, dir, alpha, h, coef): """Return the electromagnetic field vectors for the specified mode and point Args: x: A float indicating the x coordinate [um] y: A float indicating the y coordinate [um] w: A complex indicating the angular frequency dir: "h" (horizontal polarization) or "v" (vertical polarization) alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or 'E' for TE-like mode, n is the order of the mode, and m is the number of modes in the order and the polarization. h: A complex indicating the phase constant. coef: The coefficients of TE- and TM- components Returns: f_vec: An array of complexes [ex, ey, ez, hx, hy, hz]. """ pol, n, m = alpha a, b = coef r = np.hypot(x, y) p = np.arctan2(y, x) u = self.samples.u(h ** 2, w, self.fill(w)) v = self.samples.v(h ** 2, w, self.clad(w)) ur = u * r / self.r vr = v * r / self.r if dir == "h": fr = np.cos(n * p) fp = -np.sin(n * p) else: fr = np.sin(n * p) fp = np.cos(n * p) y_te = Cylinder.y_te(w, h) if r <= self.r: y_tm = self.y_tm_inner(w, h) er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr er_tm = ssp.jvp(n, ur) * fr er = a * er_te + b * er_tm ep_te = ssp.jvp(n, ur) * fp ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp ep = a * ep_te + b * ep_tm ez = u / (1j * h * self.r) * b * ssp.jv(n, ur) * fr hr = -y_te * a * ep_te - y_tm * b * ep_tm hp = y_te * a * er_te + y_tm * b * er_tm hz = -u / (1j * h * self.r) * y_te * a * ssp.jv(n, ur) * fp else: y_tm = self.y_tm_outer(w, h) val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v)) er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val er_tm = ssp.kvp(n, vr) * fr * val er = a * er_te + b * er_tm ep_te = ssp.kvp(n, vr) * fp * val ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val ep = a * ep_te + b * ep_tm ez = -v / (1j * h * self.r) * b * ssp.kv(n, vr) * fr * val hr = -y_te * a * ep_te - y_tm * b * ep_tm hp = y_te * a * er_te + y_tm * b * er_tm hz = v / (1j * h * self.r) * y_te * a * ssp.kv(n, vr) * fp * val ex = er * np.cos(p) - ep * np.sin(p) ey = er * np.sin(p) + ep * np.cos(p) hx = hr * np.cos(p) - hp * np.sin(p) hy = hr * np.sin(p) + hp * np.cos(p) return np.array([ex, ey, ez, hx, hy, hz]) def e_field(self, x, y, w, dir, alpha, h, coef): """Return the electric field vector for the specified mode and point Args: x: A float indicating the x coordinate [um] y: A float indicating the y coordinate [um] w: A complex indicating the angular frequency dir: "h" (horizontal polarization) or "v" (vertical polarization) alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or 'E' for TE-like mode, n is the order of the mode, and m is the number of modes in the order and the polarization. h: A complex indicating the phase constant. coef: The coefficients of TE- and TM- components Returns: e_vec: An array of complexes [ex, ey, ez]. """ pol, n, m = alpha a, b = coef r = np.hypot(x, y) p = np.arctan2(y, x) u = self.samples.u(h ** 2, w, self.fill(w)) v = self.samples.v(h ** 2, w, self.clad(w)) ur = u * r / self.r vr = v * r / self.r if dir == "h": fr = np.cos(n * p) fp = -np.sin(n * p) else: fr = np.sin(n * p) fp = np.cos(n * p) if r <= self.r: er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr er_tm = ssp.jvp(n, ur) * fr er = a * er_te + b * er_tm ep_te = ssp.jvp(n, ur) * fp ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp ep = a * ep_te + b * ep_tm ez = u / (1j * h * self.r) * b * ssp.jv(n, ur) * fr else: val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v)) er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val er_tm = ssp.kvp(n, vr) * fr * val er = a * er_te + b * er_tm ep_te = ssp.kvp(n, vr) * fp * val ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val ep = a * ep_te + b * ep_tm ez = -v / (1j * h * self.r) * b * ssp.kv(n, vr) * fr * val ex = er * np.cos(p) - ep * np.sin(p) ey = er * np.sin(p) + ep * np.cos(p) return np.array([ex, ey, ez]) def h_field(self, x, y, w, dir, alpha, h, coef): """Return the magnetic field vectors for the specified mode and point Args: x: A float indicating the x coordinate [um] y: A float indicating the y coordinate [um] w: A complex indicating the angular frequency dir: "h" (horizontal polarization) or "v" (vertical polarization) alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or 'E' for TE-like mode, n is the order of the mode, and m is the number of modes in the order and the polarization. h: A complex indicating the phase constant. coef: The coefficients of TE- and TM- components Returns: h_vec: An array of complexes [hx, hy, hz]. """ pol, n, m = alpha a, b = coef r = np.hypot(x, y) p = np.arctan2(y, x) u = self.samples.u(h ** 2, w, self.fill(w)) v = self.samples.v(h ** 2, w, self.clad(w)) ur = u * r / self.r vr = v * r / self.r if dir == "h": fr = np.cos(n * p) fp = -np.sin(n * p) else: fr = np.sin(n * p) fp = np.cos(n * p) y_te = Cylinder.y_te(w, h) if r <= self.r: y_tm = self.y_tm_inner(w, h) er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr er_tm = ssp.jvp(n, ur) * fr ep_te = ssp.jvp(n, ur) * fp ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp hr = -y_te * a * ep_te - y_tm * b * ep_tm hp = y_te * a * er_te + y_tm * b * er_tm hz = -u / (1j * h * self.r) * y_te * a * ssp.jv(n, ur) * fp else: y_tm = self.y_tm_outer(w, h) val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v)) er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val er_tm = ssp.kvp(n, vr) * fr * val ep_te = ssp.kvp(n, vr) * fp * val ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val hr = -y_te * a * ep_te - y_tm * b * ep_tm hp = y_te * a * er_te + y_tm * b * er_tm hz = v / (1j * h * self.r) * y_te * a * ssp.kv(n, vr) * fp * val hx = hr * np.cos(p) - hp * np.sin(p) hy = hr * np.sin(p) + hp * np.cos(p) return np.array([hx, hy, hz]) @staticmethod def u_jnu_jnpu_pec(num_n, num_m): us = np.empty((2, num_n, num_m)) jnus = np.empty((2, num_n, num_m)) jnpus = np.empty((2, num_n, num_m)) for n in range(num_n): us[0, n] = ssp.jnp_zeros(n, num_m) us[1, n] = ssp.jn_zeros(n, num_m) jnus[0, n] = ssp.jv(n, us[0, n]) jnus[1, n] = np.zeros(num_m) jnpus[0, n] = np.zeros(num_m) jnpus[1, n] = ssp.jvp(n, us[1, n]) return us, jnus, jnpus def coefs(self, hs, w): As = [] Bs = [] for h, s, n, m in zip(hs, self.s_all, self.n_all, self.m_all): pol = "E" if s == 0 else "M" ai, bi = self.coef(h, w, (pol, n, m)) As.append(ai) Bs.append(bi) return np.ascontiguousarray(As), np.ascontiguousarray(Bs) def Ys(self, w, hs, As, Bs): vals = [] for h, s, n, a, b in zip(hs, self.s_all, self.n_all, As, Bs): pol = "E" if s == 0 else "M" vals.append(self.Y(w, h, (pol, n, 1), a, b)) return np.array(vals) def props_numpy(self, w): e1 = self.fill(w) e2 = self.clad(w) hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all]) As, Bs = self.coefs(hs, w) Ys = self.Ys(w, hs, As, Bs) if e2.real < -1e6: us = np.zeros_like(hs, dtype=complex) jus = np.zeros_like(hs, dtype=complex) jpus = np.zeros_like(hs, dtype=complex) for i, (h, s, n, m) in enumerate( zip(hs, self.s_all, self.n_all, self.m_all) ): us[i] = self.u_pec[s, n, m - 1] jus[i] = self.jnu_pec[s, n, m - 1] jpus[i] = self.jnpu_pec[s, n, m - 1] vs = (1 - 1j) * np.sqrt(0.5j * (-e2 * w ** 2 + hs ** 2)) * self.r kvs = np.zeros_like(vs) kpvs = np.zeros_like(vs) else: us = self.samples.u(hs ** 2, w, e1) vs = self.samples.v(hs ** 2, w, e2) jus = ssp.jv(self.n_all, us) jpus = ssp.jvp(self.n_all, us) kvs = ssp.kv(self.n_all, vs) kpvs = ssp.kvp(self.n_all, vs) return hs, us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys def props(self, w): e1 = self.fill(w) e2 = self.clad(w) hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all]) us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys = cylinder_utils.props_cython( w, self.r, self.s_all, self.n_all, self.m_all, hs, e1, e2, self.u_pec, self.jnu_pec, self.jnpu_pec, ) return hs, us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys
mnishida/PyMWM
src/pymwm/cylinder/__init__.py
Python
mit
26,692
# -*- coding: utf-8 -*- import logging from django.utils.deprecation import MiddlewareMixin from .shortcuts import get_oauth2_app from .toolkit_settings import API_VERSION, MIDDLEWARE_ACCESS_LOG_FORMAT logger = logging.getLogger(__name__) class VersionHeaderMiddleware(MiddlewareMixin): """ Add a X-API-Version header to the response. The version is taken from TOOLKIT['API_VERSION'] setting. """ def process_response(self, request, response): response['X-API-Version'] = API_VERSION return response class AccessLogMiddleware(MiddlewareMixin): LOG_FORMAT = MIDDLEWARE_ACCESS_LOG_FORMAT UNKNOWN_APP_NAME = 'unknown' def process_response(self, request, response): app = get_oauth2_app(request) app_name = getattr(app, 'name', self.UNKNOWN_APP_NAME) logger.info( self.LOG_FORMAT.format(app_name=app_name, request=request, response=response) ) return response
luizalabs/django-toolkit
django_toolkit/middlewares.py
Python
mit
1,039
""" WSGI config for WeatherForecast project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WeatherForecast.settings") application = get_wsgi_application()
vinicius-ronconi/WeatherForecast
WeatherForecast/wsgi.py
Python
mit
408
from qtpy.QtWidgets import QDialog from qtpy import QtGui from addie.utilities import load_ui class SaveSqDialogMessageDialog(QDialog): def __init__(self, main_window=None): self.main_window = main_window QDialog.__init__(self, parent=main_window) self.ui = load_ui('save_sq_information_dialog.ui', baseinstance=self) self.init_widgets() def init_widgets(self): self.ui.message_label.setPixmap(QtGui.QPixmap(":/preview/save_sq_selection_image.png"))
neutrons/FastGR
addie/calculate_gr/save_sq_dialog_message.py
Python
mit
504
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-02-24 17:43 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.manager class Migration(migrations.Migration): dependencies = [ ('cadastro', '0001_initial'), ] operations = [ migrations.AlterModelManagers( name='usuario', managers=[ ('_default_manager', django.db.models.manager.Manager()), ], ), migrations.AlterField( model_name='usuario', name='email', field=models.EmailField(blank=True, max_length=254, verbose_name='e-mail'), ), migrations.AlterField( model_name='usuario', name='groups', field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'), ), migrations.AlterField( model_name='usuario', name='last_login', field=models.DateTimeField(blank=True, null=True, verbose_name='last login'), ), ]
HerlanAssis/scosi
cadastro/migrations/0002_auto_20160224_1443.py
Python
mit
1,256
#!/usr/bin/env python2 # # wsi_bot_codebook3 # # Version 3 of codebook construction: # # -uses OpenCV for faster operation - but different local descriptors than in the 1st version; # -uses annotation files for defining the regions from where the descriptors are to be # extracted # - try to optimize the codebook with respect to some class labels from __future__ import (absolute_import, division, print_function, unicode_literals) __version__ = 0.1 __author__ = 'Vlad Popovici' import os import argparse as opt import numpy as np import numpy.linalg from scipy.stats import ttest_ind import skimage.draw import skimage.io from skimage.exposure import equalize_adapthist, rescale_intensity import cv2 import cv2.xfeatures2d from sklearn.cluster import MiniBatchKMeans from sklearn.lda import LDA from stain.he import rgb2he from util.storage import ModelPersistence def find_in_list(_value, _list): """ Returns the indexes of all occurrences of value in a list. """ return np.array([i for i, v in enumerate(_list) if v == _value], dtype=int) def main(): p = opt.ArgumentParser(description=""" Extracts features from annotated regions and constructs a codebook of a given size. """) p.add_argument('in_file', action='store', help='a file with image file, annotation file and label (0/1)') p.add_argument('out_file', action='store', help='resulting model file name') #p.add_argument('codebook_size', action='store', help='codebook size', type=int) p.add_argument('-t', '--threshold', action='store', type=int, default=5000, help='Hessian threshold for SURF features.') p.add_argument('-s', '--standardize', action='store_true', default=False, help='should the features be standardized before codebook construction?') p.add_argument('-v', '--verbose', action='store_true', help='verbose?') args = p.parse_args() th = args.threshold all_image_names, all_descriptors = [], [] all_roi = [] y = [] unique_image_names = [] with open(args.in_file, mode='r') as fin: for l in fin.readlines(): l = l.strip() if len(l) == 0: break img_file, annot_file, lbl = [z_ for z_ in l.split()][0:3] # file names: image and its annotation and label y.append(int(lbl)) if args.verbose: print("Image:", img_file) img = cv2.imread(img_file) coords = np.fromfile(annot_file, dtype=int, sep=' ') # x y - values coords = np.reshape(coords, (coords.size/2, 2), order='C') # get the bounding box: xmin, ymin = coords.min(axis=0) xmax, ymax = coords.max(axis=0) if args.verbose: print("\t...H&E extraction") img = img[ymin:ymax+2, xmin:xmax+2, :] # keep only the region of interest img_h, _ = rgb2he(img, normalize=True) # get the H- component img_h = equalize_adapthist(img_h) img_h = rescale_intensity(img_h, out_range=(0,255)) # make sure the dtype is right for image and the mask: OpenCV is sensitive to data type img_h = img_h.astype(np.uint8) if args.verbose: print("\t...building mask") mask = np.zeros(img_h.shape, dtype=np.uint8) r, c = skimage.draw.polygon(coords[:,1]-ymin, coords[:,0]-xmin) # adapt to new image... mask[r,c] = 1 # everything outside the region is black if args.verbose: print("\t...feature detection and computation") img_h *= mask feat = cv2.xfeatures2d.SURF_create(hessianThreshold=th) keyp, desc = feat.detectAndCompute(img_h, mask) if args.verbose: print("\t...", str(len(keyp)), "features extracted") all_descriptors.extend(desc) all_image_names.extend([img_file] * len(keyp)) unique_image_names.append(img_file) # end for X = np.hstack(all_descriptors) X = np.reshape(X, (len(all_descriptors), all_descriptors[0].size), order='C') if args.standardize: # make sure each variable (column) is mean-centered and has unit standard deviation Xm = np.mean(X, axis=0) Xs = np.std(X, axis=0) Xs[np.isclose(Xs, 1e-16)] = 1.0 X = (X - Xm) / Xs y = np.array(y, dtype=int) rng = np.random.RandomState(0) acc = [] # will keep accuracy of the classifier vqs = [] # all quantizers, to find the best for k in np.arange(10, 121, 10): # Method: # -generate a codebook with k codewords # -re-code the data # -compute frequencies # -estimate classification on best 10 features if args.verbose: print("\nK-means clustering (k =", str(k), ")") print("\t...with", str(X.shape[0]), "points") #-codebook and re-coding vq = MiniBatchKMeans(n_clusters=k, random_state=rng, batch_size=500, compute_labels=True, verbose=False) # vector quantizer vq.fit(X) vqs.append(vq) #-codeword frequencies frq = np.zeros((len(unique_image_names), k)) for i in range(vq.labels_.size): frq[unique_image_names.index(all_image_names[i]), vq.labels_[i]] += 1.0 for i in range(len(unique_image_names)): if frq[i, :].sum() > 0: frq[i, :] /= frq[i, :].sum() if args.verbose: print("...\tfeature selection (t-test)") pv = np.ones(k) for i in range(k): _, pv[i] = ttest_ind(frq[y == 0, i], frq[y == 1, i]) idx = np.argsort(pv) # order of the p-values if args.verbose: print("\t...classification performance estimation") clsf = LDA(solver='lsqr', shrinkage='auto').fit(frq[:,idx[:10]], y) # keep top 10 features acc.append(clsf.score(frq[:, idx[:10]], y)) acc = np.array(acc) k = np.arange(10, 121, 10)[acc.argmax()] # best k if args.verbose: print("\nOptimal codebook size:", str(k)) # final codebook: vq = vqs[acc.argmax()] # compute the average distance and std.dev. of the points in each cluster: avg_dist = np.zeros(k) sd_dist = np.zeros(k) for k in range(0, k): d = numpy.linalg.norm(X[vq.labels_ == k, :] - vq.cluster_centers_[k, :], axis=1) avg_dist[k] = d.mean() sd_dist[k] = d.std() with ModelPersistence(args.out_file, 'c', format='pickle') as d: d['codebook'] = vq d['shift'] = Xm d['scale'] = Xs d['standardize'] = args.standardize d['avg_dist_to_centroid'] = avg_dist d['stddev_dist_to_centroid'] = sd_dist return True if __name__ == '__main__': main()
vladpopovici/WSItk
WSItk/tools/wsi_bot_codebook3.py
Python
mit
7,170
from django.contrib import admin # Register your models here. from .models import Engine admin.site.register(Engine)
electblake/python-lipsumation
lipsumation/engines/admin.py
Python
mit
119
import _plotly_utils.basevalidators class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator): def __init__( self, plotly_name="minexponent", parent_name="scattergeo.marker.colorbar", **kwargs ): super(MinexponentValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), min=kwargs.pop("min", 0), **kwargs )
plotly/plotly.py
packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/_minexponent.py
Python
mit
503
# coding=utf8 class Error(Exception): pass class TitleRequiredError(Error): pass class TextRequiredError(Error): pass class APITokenRequiredError(Error): pass class GetImageRequestError(Error): pass class ImageUploadHTTPError(Error): pass class FileTypeNotSupported(Error): pass class TelegraphUnknownError(Error): pass class TelegraphPageSaveFailed(Error): # reason is unknown pass class TelegraphContentTooBigError(Error): def __init__(self, message): message += ". Max size is 64kb including markup" super(Error, TelegraphError).__init__(self, message) class TelegraphFloodWaitError(Error): def __init__(self, message): super(Error, TelegraphError).__init__(self, message) self.FLOOD_WAIT_IN_SECONDS = int(message.split('FLOOD_WAIT_')[1]) class TelegraphError(Error): def __init__(self, message): if 'Unknown error' in message: raise TelegraphUnknownError(message) elif 'Content is too big' in message: raise TelegraphContentTooBigError(message) elif 'FLOOD_WAIT_' in message: raise TelegraphFloodWaitError(message) elif 'PAGE_SAVE_FAILED' in message: raise TelegraphPageSaveFailed(message) else: super(Error, TelegraphError).__init__(self, message)
mercuree/html-telegraph-poster
html_telegraph_poster/errors.py
Python
mit
1,361
import os import sys import time sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import pyfbi @pyfbi.target def func1(): time.sleep(1) def func2(): time.sleep(2) @pyfbi.target def func3(): time.sleep(3) with pyfbi.watch(): [f() for f in (func1, func2, func3)] pyfbi.show() with pyfbi.watch(global_watch=True): [f() for f in (func1, func2, func3)] pyfbi.show()
icoxfog417/pyfbi
tests/demo.py
Python
mit
404
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._time_series_insights_client import TimeSeriesInsightsClient __all__ = ['TimeSeriesInsightsClient']
Azure/azure-sdk-for-python
sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/aio/__init__.py
Python
mit
574
from item import Item import random from floatingText import FloatingText,FloatingTextManager from playerConsole import PlayerConsole from serializable import Serializable from eventRegistry import Event from eventRegistry import EventRegistry class Inventory(Serializable): INV_SIZE_X = 10 INV_SIZE_Y = 3 BASE_EVENT_TYPE = 'inv_' def __init__(self, **kwargs): self.items = kwargs.get("items",[[None]*3 for _ in range(10)]) self.hotbar = kwargs.get("hotbar",[None]*10) self.hotbarSelection = kwargs.get("hotbarSelection",0) self.update() def addItem(self, item): EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'add', {'item':item} )) for x in range(Inventory.INV_SIZE_X): if self.hotbar[x] != None and self.hotbar[x].name == item.name: self.hotbar[x].quantity += item.quantity return if self.hotbar[x] == None and isinstance(item, Item): self.hotbar[x] = item return for x in range(Inventory.INV_SIZE_X): for y in range(Inventory.INV_SIZE_Y): if self.items[x][y] != None and self.items[x][y].name == item.name: self.items[x][y].quantity += item.quantity return if self.items[x][y] == None and isinstance(item, Item): self.items[x][y] = item return def removeItem(self, item): EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'remove', {'item':item} )) for x in range(Inventory.INV_SIZE_X): if self.hotbar[x] == item: self.hotbar[x] = None return for x in range(Inventory.INV_SIZE_X): for y in range(Inventory.INV_SIZE_Y): if self.items[x][y] == item: self.items[x][y] = None return def removeItemQuantity(self, item, quantity): """ NOTE: Takes an item NAME as the 'item' param, not an Item object. """ EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'remove_quantity', {'item_name':item, 'quantity':quantity} )) quantityRemoved = 0; for x in range(Inventory.INV_SIZE_X): if self.hotbar[x] is not None and self.hotbar[x].name == item: currItem = self.hotbar[x] if currItem.quantity > quantity: currItem.quantity -= quantity quantityRemoved = quantity elif currItem.quantity <= quantity: quantityRemoved += currItem.quantity self.hotbar[x] = None if(quantityRemoved >= quantity): return for x in range(Inventory.INV_SIZE_X): for y in range(Inventory.INV_SIZE_Y): if self.items[x][y] is not None and self.items[x][y].name == item: currItem = self.items[x][y] if currItem.quantity > quantity: currItem.quantity -= quantity quantityRemoved = quantity elif currItem.quantity <= quantity: quantityRemoved += currItem.quantity self.items[x][y] = None if(quantityRemoved >= quantity): return def removeHotbarItem(self,item): EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'remove', {'item':item} )) for x in range(Inventory.INV_SIZE_X): if self.hotbar[x] == item: self.hotbar[x] = None return def placeItem(self, item, pos): EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'add', {'item':item} )) if isinstance(item,Item): oldItem = self.items[int(pos[0])][int(pos[1])] self.items[int(pos[0])][int(pos[1])] = item return oldItem def placeItemInHotbar(self, item, pos): EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'add', {'item':item} )) if isinstance(item,Item): oldItem = self.hotbar[pos[0]] self.hotbar[pos[0]] = item return oldItem def getTotalItemQuantity(self, item): """ Gets total quantity held of a specific item accross all stacks within Inventory """ quantity = 0; for x in range(Inventory.INV_SIZE_X): for y in range(Inventory.INV_SIZE_Y): if self.items[x][y] is not None: if self.items[x][y].name == item: quantity += self.items[x][y].quantity for x in range(Inventory.INV_SIZE_X): if self.hotbar[x] is not None: if self.hotbar[x].name == item: quantity += self.hotbar[x].quantity return quantity def addItemToHotbar(item): EventRegistry.registerEvent(Event( Inventory.BASE_EVENT_TYPE + 'add', {'item':item} )) for x in range(INV_SIZE_X): if hotbar[x] == None: hotbar[x] = item return def update(self): for x in range(Inventory.INV_SIZE_X): for y in range(Inventory.INV_SIZE_Y): if self.items[x][y] is not None and self.items[x][y].quantity <= 0: self.items[x][y] = None if self.items[x][y] is not None: self.items[x][y].update() for x in range(Inventory.INV_SIZE_X): if self.hotbar[x] is not None and self.hotbar[x].quantity <= 0: self.hotbar[x] = None if self.hotbar[x] is not None: self.hotbar[x].update()
benjamincongdon/adept
inventory.py
Python
mit
6,093
#coding:utf-8 ################################# #Copyright(c) 2014 dtysky ################################# import G2R class ScSp(G2R.SpSyntax): def Show(self,Flag,Attrs,US,UT,Tmp,FS): sw='' name,Attrs=self.Check(Flag,Attrs,UT,FS) if Attrs['k']=='Main': sw+=' $ store.chapter=' sw+="'Chapter."+Attrs['cp']+Attrs['sc']+"'\n" return sw
dtysky/Gal2Renpy
Gal2Renpy/SpSyntax/ScSp.py
Python
mit
352
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE','tango_with_django_project.settings') import django django.setup() from rango.models import Category, Page def populate(): # First, we will create lists of dictionaries containing the pages # we want to add into each category. # Then we will create a dictionary of dictionaries for our categories. # This might seem a little bit confusing, but it allows us to iterate # through each data structure, and add the data to our models. python_pages = [ {"title": "Official Python Tutorial", "url": "http://docs.python.org/2/tutorial/", "views": 32}, {"title": "How to Think like a Computer Scientist", "url": "http://www.greenteapress.com/thinkpython/", "views": 16}, {"title": "Learn Python in 10 Minutes", "url": "http://www.korokithakis.net/tutorials/python/", "views": 8}] django_pages = [ {"title": "Official Django Tutorial", "url": "https://docs.djangoproject.com/en/1.9/intro/tutorial01/", "views": 32}, {"title": "Django Rocks", "url": "http://www.djangorocks.com/", "views": 16}, {"title": "How to Tango with Django", "url":"http://www.tangowithdjango.com/", "views": 8}] other_pages = [ {"title": "Bottle", "url": "http://bottlepy.org/docs/dev/", "views": 32}, {"title": "Flask", "url": "http://flask.pocoo.org", "views": 16} ] cats = {"Python": {"pages": python_pages, "views":128, "likes":64}, "Django": {"pages": django_pages, "views":64, "likes":32}, "Other Frameworks": {"pages": other_pages, "views":32, "likes":16}, "Python User Group": {"pages": [], "views": 34, "likes": 16}, "Pascal": {"pages": [], "views": 32, "likes": 16}, "Perl": {"pages": [], "views": 32, "likes": 16}, "Php": {"pages": [], "views": 32, "likes": 16}, "Prolog": {"pages": [], "views": 32, "likes": 16}, "Programming": {"pages": [], "views": 32, "likes": 16} } # The code below goes through the cats dictionary, then adds each category, # and then adds all the associated pages for that category. # if you are using Python 2.x then use cats.iteritems() see # http://docs.quantifiedcode.com/python-anti-patterns/readability/ # for more information about how to iterate over a dictionary properly. for cat, cat_data in cats.items(): c = add_cat(cat, cat_data["views"], cat_data["likes"]) for p in cat_data["pages"]: add_page(c, p["title"], p["url"],p["views"]) # print out the categories we have added for c in Category.objects.all(): for p in Page.objects.filter(category=c): print("- {0} -{1}".format(str(c),str(p))) def add_cat(name, views, likes): c = Category.objects.get_or_create(name=name)[0] c.views = views c.likes = likes c.save() return c def add_page(cat, title, url, views=0): p = Page.objects.get_or_create(category=cat, title=title)[0] p.url = url p.views = views p.save() return p # Start execution here! if __name__ == '__main__': print("Starting Rango population script...") populate()
cclai999/rango
tango_with_django_project/populate_rango.py
Python
mit
3,340
from unittest import TestCase from CodeEval.challenge_7 import challenge class Challenge7Test(TestCase): def test_input_1(self): self.assertEqual(5, challenge("- * / 15 - 7 + 1 1 3 + 2 + 1 1")) def test_input_2(self): self.assertEqual(20, challenge("* + 2 3 4"))
andrewzwicky/puzzles
CodeEval/test_challenge_7.py
Python
mit
289
from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) import tensorflow as tf sess = tf.InteractiveSession() x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.global_variables_initializer()) y = tf.matmul(x,W) + b cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) for _ in range(1000): batch = mnist.train.next_batch(100) train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
StephanH84/reinforcement_learning_explorations
tensorflow/src/MNIST1.py
Python
mit
912
from pqhelper import base, capture, versus # these parts are heavy so keep one common object for the module _state_investigator = base.StateInvestigator() def versus_summaries(turns=2, sims_to_average=2, async_results_q=None): """Return summaries of the likely resutls of each available action.. Arguments: - turns: how many turns to simulate. - in 2013, 1 is fast (seconds), 2 is slow (seconds), 3 who knows - sims_to_average: how many times to run the simulation to get more representative average results of each action. - async_results_q: provide a multiprocessing Queue on which the summaries of each turn will be placed. this is an asynchronous alternative to waiting for the final return value """ board, player, opponent, extra_actions = _state_investigator.get_versus() if extra_actions: extra_actions = 1 # limit value for realistic time if board is None: return tuple() averaged_summaries = list() # default return value is empty # keep a separate advisor for each simulation to average advisors = list() for i in range(sims_to_average): advisor = versus.Advisor() advisor.reset(board, player, opponent, extra_actions) advisors.append(advisor) # provide async sim results per turn; final results as return value for turn in range(turns): # store {action: list of results from each simulation} summaries_by_action = dict() for i in range(sims_to_average): advisor = advisors[i] advisor.simulate_next_turn() for s in advisor.sorted_current_summaries(): summaries_by_action.setdefault(s.action, list()).append(s) # now all sims and analysis for this turn have been completed averaged_summaries = list() for action, summaries in summaries_by_action.items(): board = summaries[0].board # any board. they are all the same action = summaries[0].action # any action. they are all the same score_sum = sum(s.score for s in summaries) score_avg = score_sum / len(summaries) manadrain_sum = sum(s.mana_drain_leaves for s in summaries) leaves_sum = sum(s.total_leaves for s in summaries) avg_summary = base.Summary(board, action, score_avg, manadrain_sum, leaves_sum) averaged_summaries.append(avg_summary) averaged_summaries.sort(key=lambda s: s.score, reverse=True) # option to provide the results asynchronouslys if not async_results_q is None: async_results_q.put(averaged_summaries) return averaged_summaries def capture_solution(): board = _state_investigator.get_capture() if board is None: return tuple() steps = capture.capture(board) return steps if __name__ == '__main__': pass
kobejohn/PQHelper
pqhelper/easy.py
Python
mit
2,931
"""Convenient imports""" from pswingw2.client import send_simple_message # noqa from pswingw2.client import send # noqa from pswingw2.client import send_single # noqa from pswingw2.client import send_batch # noqa from pswingw2.client import Client # noqa from pswingw2.config_defaults import get_simple_config as config # noqa
ZettaIO/pswingw2py
pswingw2/__init__.py
Python
mit
334
from django.conf.urls import url from articles import views urlpatterns = [ url(r'^$', views.list_all, name='articles'), url(r'^(?P<id>[0-9]+)/$', views.article, name='article'), url(r'^admin/$', views.admin, name='admin_articles'), url(r'^add/$', views.add, name='add_article'), url(r'^delete/$', views.delete, name='del_article'), url(r'^file-upload/$', views.upload_file, name='upload_file'), url(r'^edit/(?P<id>[0-9]+)/$', views.edit, name='edit_article'), url(r'^edit/(?P<id>[0-9]+)/(?P<history_id>[0-9]+)/$', views.edit, name='edit_article'), url(r'^history/(?P<id>[0-9]+)/$', views.history, name='history_article'), url(r'^update_state/(?P<id>[0-9]+)/(?P<state>[A-Z]+)/$', views.update_state, name='update_state_article'), ]
Dubrzr/golb
articles/urls.py
Python
mit
774
# Copyright (C) 2011 by Stefano Palazzo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import zlib import struct def make(image: '[[[r, g, b, a, ], ], ]') -> bytes: ''' Create PNG image from RGBA data Expects a list of lines of pixels of R, G, B, and Alpha values. I.e: [ [ [0, 0, 0, 0], [0, 0, 0, 0], ], [ [0, 0, 0, 0], [0, 0, 0, 0], ], ] ''' def cr_png(buf, width, height): def png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack("!I", len(data)) + chunk_head + struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head))) raw_data = (b"".join(b'\x00' + buf[span:span + (width * 4)] for span in range((height - 1) * (width * 4), -1, - (width * 4)))) return b"".join([ b'\x89PNG\r\n\x1a\n', png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def make_buffer(image): def bufgen(nested): for i in nested[::-1]: for j in i: for k in j if len(j) == 4 else list(j) + [255]: yield k height, width = len(image), len(image[0]) return bytes(bufgen(image)), width, height return cr_png(*make_buffer(image)) def show(png): open("/tmp/test.png", "wb").write(png) subprocess.getoutput("xdg-open /tmp/test.png") subprocess.getoutput("rm /tmp/test.png")
sfstpala/png.py
png.py
Python
mit
2,602
# coding: utf-8 """ ORCID Member No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: Latest Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class CreditName(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, value=None): """ CreditName - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'value': 'str' } self.attribute_map = { 'value': 'value' } self._value = value @property def value(self): """ Gets the value of this CreditName. :return: The value of this CreditName. :rtype: str """ return self._value @value.setter def value(self, value): """ Sets the value of this CreditName. :param value: The value of this CreditName. :type: str """ self._value = value def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, CreditName): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
orcid_api/models/credit_name.py
Python
mit
2,882
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_list_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-11-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) class PrivateLinkResourcesOperations(object): """PrivateLinkResourcesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.containerservice.v2020_11_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> "_models.PrivateLinkResourcesListResult": """Gets a list of private link resources in the specified managed cluster. Gets a list of private link resources in the specified managed cluster. The operation returns properties of each private link resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PrivateLinkResourcesListResult, or the result of cls(response) :rtype: ~azure.mgmt.containerservice.v2020_11_01.models.PrivateLinkResourcesListResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourcesListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources'} # type: ignore
Azure/azure-sdk-for-python
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_11_01/operations/_private_link_resources_operations.py
Python
mit
5,923
import ply.yacc as yacc from bsi_lexer import tokens from bsi_object import BsiObject from bsi_array import BsiArray def p_object_pairs(p): 'obj : pairs' p[0] = BsiObject() for pair in p[1]: p[0].set(pair[0], pair[1]) def p_pairs_pair(p): 'pairs : pair' p[0] = [p[1]] def p_pairs_pair_pairs(p): 'pairs : pair pairs' p[0] = [p[1]] + p[2] def p_pair_key_eq_value(p): 'pair : KEY EQ val' p[0] = (p[1], p[3]) def p_val_num(p): 'val : NUM' p[0] = p[1] def p_val_string(p): 'val : STRING' p[0] = p[1] def p_val_array(p): 'val : L_SQ_BR vals R_SQ_BR' p[0] = BsiArray(p[2]) def p_array_val(p): 'vals : val' p[0] = [p[1]] def p_array_vals(p): 'vals : val vals' p[0] = [p[1]] + p[2] def p_val_nested_obj(p): 'val : L_BRACE obj R_BRACE' p[0] = p[2] def p_error(p): print p print "Syntax error in input!" bsi_parser = yacc.yacc()
jshou/bsi
bsi/bsi_parser.py
Python
mit
930
"""Build a sentiment analysis / polarity model Sentiment analysis can be casted as a binary text classification problem, that is fitting a linear classifier on features extracted from the text of the user messages so as to guess wether the opinion of the author is positive or negative. In this examples we will use a movie review dataset. """ # Author: Olivier Grisel <[email protected]> # Edited: Kevin Wong # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_files from sklearn.model_selection import train_test_split from sklearn import metrics if __name__ == "__main__": # NOTE: we put the following in a 'if __name__ == "__main__"' protected # block to be able to use a multi-core grid search that also works under # Windows, see: http://docs.python.org/library/multiprocessing.html#windows # The multiprocessing module is used as the backend of joblib.Parallel # that is used when n_jobs != 1 in GridSearchCV # the training data folder must be passed as first argument movie_reviews_data_folder = sys.argv[1] dataset = load_files(movie_reviews_data_folder, shuffle=False) print("n_samples: %d" % len(dataset.data)) # split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.25, random_state=None) # TASK: Build a vectorizer / classifier pipeline that filters out tokens # that are too rare or too frequent pipeline = Pipeline([ ('vect', TfidfVectorizer(min_df=3, max_df=0.95)), ('clf', LinearSVC(C=1000)), ]) ##TfidfVectorizer params: #min_df: ignore terms that have doc frequency < threshold (cut-off). #float in [0,1] represents proportion of docs, integer represents absolute counts #max_df: similar # TASK: Build a grid search to find out whether unigrams or bigrams are # more useful. # Fit the pipeline on the training set using grid search for the parameters parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) grid_search.fit(docs_train, y_train) # TASK: print the cross-validated scores for the each parameters set # explored by the grid search n_candidates = len(grid_search.cv_results_['params']) print("n_candidates = {}".format(n_candidates)) for i in range(n_candidates): print(i, 'params - %s; mean - %0.2f; std - %0.2f' % (grid_search.cv_results_['params'][i], grid_search.cv_results_['mean_test_score'][i], grid_search.cv_results_['std_test_score'][i])) # TASK: Predict the outcome on the testing set and store it in a variable # named y_predicted y_predicted = grid_search.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Print and plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) # import matplotlib.pyplot as plt # plt.matshow(cm) # plt.show() #new-- # Predict the result on some short new sentences: sentences = [ u'Wow! this movie was amazing. I was impressed by the in-depth action, humor, and special effects! I loved the scene where the main character saved the day! Awesome and would see again! I definitely recommend this to anyone!', u'No offense, but this was one of the worst movies I\'ve seen in many years. It had little emotion and no character development. Waste of time. Ugh! ', ] predicted2 = grid_search.predict(sentences) for s, p in zip(sentences, predicted2): print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p])) from sklearn.externals import joblib joblib.dump(grid_search, 'moviesentiments.pkl') #load with using: #grid_search = joblib.load('moviesentiments.pkl')
gnublet/py_explorations
sklearn/exercise_02_sentiment.py
Python
mit
4,170
#-*- coding: UTF-8 -*- import os import web import sys sys.path.append("..") import News_Recommend import similar_doc import jieba import config import word2Vec.word2vec as Word2Vec path="../data/netease" query=News_Recommend.CosineScore(path+config.inverted_Dictionary_filename,path+config.inverted_index_filename,config.buff_size,config.crawled_web_numbers) recommand=News_Recommend.FastCosineScore(path+config.inverted_Dictionary_filename,path+config.inverted_index_filename,config.cache_size,path[:-7]+config.stopword_filename,config.crawled_web_numbers) #recommand.get_from_file(path+config.similar_filename) #query=News_Recommend.FastCosineScore(filename[:-4]+'_index_Dictionary.txt',filename[:-4]+'_inverted_index.txt',buff_size,100000) id_index=similar_doc.doc_id_index(path+config.index_filename,path+config.data_filename,config.cache_size) punct = set(u'''/+%#:!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐、﹒ ﹔﹕﹖﹗﹚﹜﹞!),.:;?|}︴︶︸︺︼︾﹀﹂﹄﹏、~¢ 々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖([{£¥〝︵︷︹︻ ︽︿﹁﹃﹙﹛﹝({“‘-—_…''') Letters_and_numbers = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') word2vec=Word2Vec.WORD2VEC("../",'../data/'+config.word2Vec_filename) render = web.template.render('templates/') urls=( "/","index", "/news","news" ) app = web.application(urls,globals()) class index: def __init__(self): pass def GET(self): data=web.input() if data: searchword=data.searchword else: searchword='' news_list=list() topic=list() if searchword: cut = jieba.cut_for_search(searchword) word_list = [] for word in cut: if word not in punct and word not in Letters_and_numbers: word_list.append(word.encode("utf-8")) topK=query.calculate(word_list,config.query_return_numbers) for k in topK: data = dict() title, content, url= id_index.get_data(k) data['id'] = k data['content'] = content.decode("utf-8")[:config.query_return_snipper_size] data['title']=title.decode("utf-8") data['url'] = url.decode("utf-8") news_list.append(data) del data,cut,word_list,word,topK,title,content,url #word2Vec推荐相似主题 word2vec.cal(searchword.encode('utf-8')) print word2vec.result.length if word2vec.result.length==0:#词不存在,长度为1 pass else: for i in range(config.recommand_topic_numbers): topic.append(word2vec.result.word[i].char) return render.index(searchword,news_list,topic) class news: def __init__(self): pass def GET(self): data=web.input() if data: ID=data.id news = dict() title, content, url=id_index.get_data(int(ID)) news['content'] = content.decode("utf-8") news['title'] = title.decode("utf-8") news['url'] = url.decode("utf-8") recomand=[] #在线方法 cut = jieba.cut_for_search(content) word_list = [] for word in cut: if word not in punct and word not in Letters_and_numbers: # 计算文档间相似度,必须去停用词,否则太慢 if recommand.stopword.has_key(word.encode("utf-8")): pass else: word_list.append(word.encode("utf-8")) topk= recommand.calculate(word_list, config.recommand_numbers, 10) for i in topk:#在线方法 #for i in recommand.dic[int(ID)]:#离线方法 if i !=int(ID): title, content, url=id_index.get_data(i) recomand.append([title.decode('utf-8'),content.decode('utf-8'),url.decode('utf-8')]) news['recommand']=recomand del title,content,url,recomand else: ID='' news = dict() news['title'] = "No Such News" news['content'] = "Oh No!" news['url'] = "#" news['recommand']=[['','',''] for m in range(config.recommand_numbers)] return render.news(news) if __name__ == "__main__": app = web.application(urls,globals()) app.run()
Google1234/Information_retrieva_Projectl-
web/main.py
Python
mit
4,577
"""Exceptions for APIs.""" import werkzeug.exceptions class ApiException(Exception): """An exception raised due to user error. Exceptions derived from ApiException will be logged automatically if raised. User will receive appropriate error response according to the content type. """ description = 'API error.' error_type = 'API_ERROR' data = None def __init__(self, message, description=None, status_code=None, **kwargs): """Constructor. :param message: the message returned to the user. :param description: the message sent to the log. """ self.message = message if description is not None or not hasattr(self, 'description'): self.description = description if status_code is not None: self.code = status_code if kwargs: self.data = kwargs class NotFound(ApiException, werkzeug.exceptions.NotFound): """Not found.""" description = 'Not found.' error_type = 'NOT_FOUND' class BadRequest(ApiException, werkzeug.exceptions.BadRequest): """Bad request.""" description = 'Bad request.' error_type = 'BAD_REQUEST' class Forbidden(ApiException, werkzeug.exceptions.Forbidden): """Forbidden.""" description = 'Forbidden.' error_type = 'FORBIDDEN' class Unauthorized(ApiException, werkzeug.exceptions.Unauthorized): """Unauthorized.""" description = 'Unauthorized.' error_type = 'UNAUTHORIZED'
paylogic/atilla
atilla/exceptions.py
Python
mit
1,492
from django.db import models from phonenumber_field.modelfields import PhoneNumberField class PhoneNumber(models.Model): provider = models.ForeignKey( to='providers.Provider' ) phone_number = PhoneNumberField()
aniruddha-adhikary/bookit
bookit/providers/models/phone_number.py
Python
mit
235
""" Blackboard Analysis Tools Copyright 2013, Jeroen Doggen, [email protected] """ import sys from blackboard_analysis_tools.blackboard import BlackboardAnalysisTools def run(): """Run the main program""" assignment_analyser = BlackboardAnalysisTools() #assignment_analyser.init() assignment_analyser.run() return(assignment_analyser.exit_value()) if __name__ == "__main__": sys.exit(run())
jeroendoggen/blackboard-analysis-tools
blackboard_analysis_tools/main.py
Python
mit
434
from __future__ import unicode_literals import base64 import binascii import hashlib import importlib from collections import OrderedDict from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.signals import setting_changed from django.dispatch import receiver from django.utils import lru_cache from django.utils.crypto import ( constant_time_compare, get_random_string, pbkdf2, ) from django.utils.encoding import force_bytes, force_str, force_text from django.utils.module_loading import import_string from django.utils.translation import ugettext_noop as _ UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX def is_password_usable(encoded): if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX): return False try: identify_hasher(encoded) except ValueError: return False return True def check_password(password, encoded, setter=None, preferred='default'): """ Returns a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password. """ if password is None or not is_password_usable(encoded): return False preferred = get_hasher(preferred) hasher = identify_hasher(encoded) must_update = hasher.algorithm != preferred.algorithm if not must_update: must_update = preferred.must_update(encoded) is_correct = hasher.verify(password, encoded) if setter and is_correct and must_update: setter(password) return is_correct def make_password(password, salt=None, hasher='default'): """ Turn a plain-text password into a hash for database storage Same as encode() but generates a new random salt. If password is None then a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string will be returned which disallows logins. Additional random string reduces chances of gaining access to staff or superuser accounts. See ticket #20079 for more info. """ if password is None: return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH) hasher = get_hasher(hasher) if not salt: salt = hasher.salt() return hasher.encode(password, salt) @lru_cache.lru_cache() def get_hashers(): hashers = [] for hasher_path in settings.PASSWORD_HASHERS: hasher_cls = import_string(hasher_path) hasher = hasher_cls() if not getattr(hasher, 'algorithm'): raise ImproperlyConfigured("hasher doesn't specify an " "algorithm name: %s" % hasher_path) hashers.append(hasher) return hashers @lru_cache.lru_cache() def get_hashers_by_algorithm(): return {hasher.algorithm: hasher for hasher in get_hashers()} @receiver(setting_changed) def reset_hashers(**kwargs): if kwargs['setting'] == 'PASSWORD_HASHERS': get_hashers.cache_clear() get_hashers_by_algorithm.cache_clear() def get_hasher(algorithm='default'): """ Returns an instance of a loaded password hasher. If algorithm is 'default', the default hasher will be returned. This function will also lazy import hashers specified in your settings file if needed. """ if hasattr(algorithm, 'algorithm'): return algorithm elif algorithm == 'default': return get_hashers()[0] else: hashers = get_hashers_by_algorithm() try: return hashers[algorithm] except KeyError: raise ValueError("Unknown password hashing algorithm '%s'. " "Did you specify it in the PASSWORD_HASHERS " "setting?" % algorithm) def identify_hasher(encoded): """ Returns an instance of a loaded password hasher. Identifies hasher algorithm by examining encoded hash, and calls get_hasher() to return hasher. Raises ValueError if algorithm cannot be identified, or if hasher is not loaded. """ # Ancient versions of Django created plain MD5 passwords and accepted # MD5 passwords with an empty salt. if ((len(encoded) == 32 and '$' not in encoded) or (len(encoded) == 37 and encoded.startswith('md5$$'))): algorithm = 'unsalted_md5' # Ancient versions of Django accepted SHA1 passwords with an empty salt. elif len(encoded) == 46 and encoded.startswith('sha1$$'): algorithm = 'unsalted_sha1' else: algorithm = encoded.split('$', 1)[0] return get_hasher(algorithm) def mask_hash(hash, show=6, char="*"): """ Returns the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked class BasePasswordHasher(object): """ Abstract base class for password hashers When creating your own hasher, you need to override algorithm, verify(), encode() and safe_summary(). PasswordHasher objects are immutable. """ algorithm = None library = None def _load_library(self): if self.library is not None: if isinstance(self.library, (tuple, list)): name, mod_path = self.library else: mod_path = self.library try: module = importlib.import_module(mod_path) except ImportError as e: raise ValueError("Couldn't load %r algorithm library: %s" % (self.__class__.__name__, e)) return module raise ValueError("Hasher %r doesn't specify a library attribute" % self.__class__.__name__) def salt(self): """ Generates a cryptographically secure nonce salt in ASCII """ return get_random_string() def verify(self, password, encoded): """ Checks if the given password is correct """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method') def encode(self, password, salt): """ Creates an encoded database value The result is normally formatted as "algorithm$salt$hash" and must be fewer than 128 characters. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method') def safe_summary(self, encoded): """ Returns a summary of safe values The result is a dictionary and will be used where the password field must be displayed to construct a safe representation of the password. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method') def must_update(self, encoded): return False class PBKDF2PasswordHasher(BasePasswordHasher): """ Secure password hashing using the PBKDF2 algorithm (recommended) Configured to use PBKDF2 + HMAC + SHA256 with 20000 iterations. The result is a 64 byte binary string. Iterations may be changed safely but you must rename the algorithm if you change SHA256. """ algorithm = "pbkdf2_sha256" iterations = 20000 digest = hashlib.sha256 def encode(self, password, salt, iterations=None): assert password is not None assert salt and '$' not in salt if not iterations: iterations = self.iterations hash = pbkdf2(password, salt, iterations, digest=self.digest) hash = base64.b64encode(hash).decode('ascii').strip() return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash) def verify(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt, int(iterations)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('iterations'), iterations), (_('salt'), mask_hash(salt)), (_('hash'), mask_hash(hash)), ]) def must_update(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) return int(iterations) != self.iterations class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher): """ Alternate PBKDF2 hasher which uses SHA1, the default PRF recommended by PKCS #5. This is compatible with other implementations of PBKDF2, such as openssl's PKCS5_PBKDF2_HMAC_SHA1(). """ algorithm = "pbkdf2_sha1" digest = hashlib.sha1 class BCryptSHA256PasswordHasher(BasePasswordHasher): """ Secure password hashing using the bcrypt algorithm (recommended) This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. """ algorithm = "bcrypt_sha256" digest = hashlib.sha256 library = ("bcrypt", "bcrypt") rounds = 12 def salt(self): bcrypt = self._load_library() return bcrypt.gensalt(self.rounds) def encode(self, password, salt): bcrypt = self._load_library() # Need to reevaluate the force_bytes call once bcrypt is supported on # Python 3 # Hash the password prior to using bcrypt to prevent password truncation # See: https://code.djangoproject.com/ticket/20138 if self.digest is not None: # We use binascii.hexlify here because Python3 decided that a hex encoded # bytestring is somehow a unicode. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) data = bcrypt.hashpw(password, salt) return "%s$%s" % (self.algorithm, force_text(data)) def verify(self, password, encoded): algorithm, data = encoded.split('$', 1) assert algorithm == self.algorithm bcrypt = self._load_library() # Hash the password prior to using bcrypt to prevent password truncation # See: https://code.djangoproject.com/ticket/20138 if self.digest is not None: # We use binascii.hexlify here because Python3 decided that a hex encoded # bytestring is somehow a unicode. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) # Ensure that our data is a bytestring data = force_bytes(data) # force_bytes() necessary for py-bcrypt compatibility hashpw = force_bytes(bcrypt.hashpw(password, data)) return constant_time_compare(data, hashpw) def safe_summary(self, encoded): algorithm, empty, algostr, work_factor, data = encoded.split('$', 4) assert algorithm == self.algorithm salt, checksum = data[:22], data[22:] return OrderedDict([ (_('algorithm'), algorithm), (_('work factor'), work_factor), (_('salt'), mask_hash(salt)), (_('checksum'), mask_hash(checksum)), ]) class BCryptPasswordHasher(BCryptSHA256PasswordHasher): """ Secure password hashing using the bcrypt algorithm This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. This hasher does not first hash the password which means it is subject to the 72 character bcrypt password truncation, most use cases should prefer the BCryptSha512PasswordHasher. See: https://code.djangoproject.com/ticket/20138 """ algorithm = "bcrypt" digest = None class SHA1PasswordHasher(BasePasswordHasher): """ The SHA1 password hashing algorithm (not recommended) """ algorithm = "sha1" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.sha1(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) class MD5PasswordHasher(BasePasswordHasher): """ The Salted MD5 password hashing algorithm (not recommended) """ algorithm = "md5" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.md5(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) class UnsaltedSHA1PasswordHasher(BasePasswordHasher): """ Very insecure algorithm that you should *never* use; stores SHA1 hashes with an empty salt. This class is implemented because Django used to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_sha1" def salt(self): return '' def encode(self, password, salt): assert salt == '' hash = hashlib.sha1(force_bytes(password)).hexdigest() return 'sha1$$%s' % hash def verify(self, password, encoded): encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): assert encoded.startswith('sha1$$') hash = encoded[6:] return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(hash)), ]) class UnsaltedMD5PasswordHasher(BasePasswordHasher): """ Incredibly insecure algorithm that you should *never* use; stores unsalted MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an empty salt. This class is implemented because Django used to store passwords this way and to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_md5" def salt(self): return '' def encode(self, password, salt): assert salt == '' return hashlib.md5(force_bytes(password)).hexdigest() def verify(self, password, encoded): if len(encoded) == 37 and encoded.startswith('md5$$'): encoded = encoded[5:] encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(encoded, show=3)), ]) class CryptPasswordHasher(BasePasswordHasher): """ Password hashing using UNIX crypt (not recommended) The crypt module is not supported on all platforms. """ algorithm = "crypt" library = "crypt" def salt(self): return get_random_string(2) def encode(self, password, salt): crypt = self._load_library() assert len(salt) == 2 data = crypt.crypt(force_str(password), salt) # we don't need to store the salt, but Django used to do this return "%s$%s$%s" % (self.algorithm, '', data) def verify(self, password, encoded): crypt = self._load_library() algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return constant_time_compare(data, crypt.crypt(force_str(password), data)) def safe_summary(self, encoded): algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), salt), (_('hash'), mask_hash(data, show=3)), ])
diego-d5000/MisValesMd
env/lib/python2.7/site-packages/django/contrib/auth/hashers.py
Python
mit
17,840
from pytest import fixture from itertools import combinations import msgpack as pymsgpack values = [ 42, 7, 3.14, 2.71, 'lorem', 'ipsum', True, False, None, b'lorem', b'ipsum', [], [ 'lorem', 42, 3.14, True, None, ['ipsum']], dict(), { 'lorem': 'ipsum', 'dolor': 42, 'sit': 3.14, 'amet': [ True, None], 'consectetur':{ 'adipisicing': 'elit'}}] pairs = tuple(combinations(values, 2)) @fixture def cxxjson(): from cxx import json return json @fixture def cxxmsgpack(): from cxx import msgpack return msgpack
attugit/cxxjson
test/conftest.py
Python
mit
587
from heapq import heapify, heappush, heappop class priority_dict(dict): """Dictionary that can be used as a priority queue. Keys of the dictionary are items to be put into the queue, and values are their respective priorities. All dictionary methods work as expected. The advantage over a standard heapq-based priority queue is that priorities of items can be efficiently updated (amortized O(1)) using code as 'thedict[item] = new_priority.' The 'smallest' method can be used to return the object with lowest priority, and 'pop_smallest' also removes it. The 'sorted_iter' method provides a destructive sorted iterator. """ def __init__(self, *args, **kwargs): super(priority_dict, self).__init__(*args, **kwargs) self._rebuild_heap() def _rebuild_heap(self): self._heap = [(v, k) for k, v in self.iteritems()] heapify(self._heap) def smallest(self): """Return the item with the lowest priority. Raises IndexError if the object is empty. """ heap = self._heap v, k = heap[0] while k not in self or self[k] != v: heappop(heap) v, k = heap[0] return k def pop_smallest(self): """Return the item with the lowest priority and remove it. Raises IndexError if the object is empty. """ heap = self._heap v, k = heappop(heap) while k not in self or self[k] != v: v, k = heappop(heap) del self[k] return k def __setitem__(self, key, val): # We are not going to remove the previous value from the heap, # since this would have a cost O(n). super(priority_dict, self).__setitem__(key, val) if len(self._heap) < 2 * len(self): heappush(self._heap, (val, key)) else: # When the heap grows larger than 2 * len(self), we rebuild it # from scratch to avoid wasting too much memory. self._rebuild_heap() def setdefault(self, key, val): if key not in self: self[key] = val return val return self[key] def update(self, *args, **kwargs): # Reimplementing dict.update is tricky -- see e.g. # http://mail.python.org/pipermail/python-ideas/2007-May/000744.html # We just rebuild the heap from scratch after passing to super. super(priority_dict, self).update(*args, **kwargs) self._rebuild_heap() def sorted_iter(self): """Sorted iterator of the priority dictionary items. Beware: this will destroy elements as they are returned. """ while self: yield self.pop_smallest()
ActiveState/code
recipes/Python/522995_Priority_dict_priority_queue_updatable/recipe-522995.py
Python
mit
2,795
""" vue2svg : spike/prototype for scenetool. generates an svg scene from VUE files specified on command line. usage: python3.2 vue2svg.py ../test/vue/*.vue https://github.com/tangentstorm/scenetool copyright (c) 2013 michal j wallace. available to the public under the MIT/x11 license. (see ../LICENSE) """ import os, sys, io, itertools as it from collections import namedtuple import sqlite3 from lxml import etree DB_PATH = "vuedata.sdb" # note: will be wiped out on each run! nsmap = { 'xsi':"http://www.w3.org/2001/XMLSchema-instance" } def xp(tree, path): match = tree.xpath(path, namespaces=nsmap) return match[0] if match else '' VueData = namedtuple('VueData', ('parent ntype shape id ts x y w h text layer autosized' ' fill strokewidth strokecolor strokestyle textcolor' ' font id1 id2 p0x p0y p1x p1y ctrlcount arrowstate' ' c0x c0y c1x c1y').split( )) def walk(tree, parent=0): """ walk the tree recursively, extracting node data """ children = tree.xpath('child') for child in children: row = VueData(*([parent] + [xp(child, path) for path in [ '@xsi:type', 'shape/@xsi:type', '@ID', '@created', '@x', '@y', '@width', '@height', '@label', '@layerID', '@autoSized', 'fillColor/text()', 'strokeWidth/text()', 'strokeColor/text()', 'strokeStyle/text()', 'textColor/text()', 'font/text()', 'ID1/text()', 'ID2/text()', 'point1/@x', 'point1/@y', 'point2/@x', 'point2/@y', '@controlCount', '@arrowState', 'ctrlPoint0/@x', 'ctrlPoint0/@y', 'ctrlPoint1/@x', 'ctrlPoint1/@y' ]])) yield row for item in walk(child, row.id): yield item def load(dbc, filename): """ load data from the vue file into the database """ # vue files are not valid xml because the doctype is misplaced. # so to fix, just strip off the opening comments: data = open(filename, 'r').read() data = data[data.find('<?xml'):] vue = etree.parse(io.BytesIO(bytes(data, 'ascii'))) cur = dbc.cursor() cur.execute('insert into file values (?)', [filename]) fid = cur.lastrowid for row in walk(vue, 0): sql = 'insert into vuedata values (? %s)' \ % (', ? ' * len(VueData._fields)) cur.execute(sql, [fid] + list(row)) def connect(): return sqlite3.connect(DB_PATH, isolation_level=None) # autocommit def main(filenames): if os.path.exists(DB_PATH): os.unlink(DB_PATH) dbc = connect() cur = dbc.cursor() cur.execute('create table if not exists file (filename string)') sql = 'create table if not exists vuedata (fid integer, %s)' \ % ', '.join('%s data' % col for col in VueData._fields) cur.execute(sql) for filename in filenames: load(dbc,filename) dbc.close() # run the scripts and check for error (non-zero exit code) if ( os.system("sqlite3 %s < schema.sql" % DB_PATH) + os.system("sqlite3 %s < vue2elem.sql" % DB_PATH) + os.system("sqlite3 %s < views.sql" % DB_PATH) ) > 0: sys.exit() dbc = connect() cur = dbc.cursor() def fetch_ntups(cur): cols = [tup[0] for tup in cur.description] ntup = namedtuple('row', cols) for row in cur.fetchall(): yield ntup(*row) def fetch_dicts(cur): cols = [tup[0] for tup in cur.description] for row in cur.fetchall(): yield dict(zip(cols, row)) print('<!doctype html>') print('<html><head><title>vue2svg</title>') print('<style type="text/css">') cur.execute( """ SELECT s.rowid AS id, fg, bg, sc, sw, f.font FROM style s LEFT JOIN font f ON s.font = f.rowid """) for row in fetch_dicts(cur): print(' '.join( """ svg .style-{id} text {{ fill: {fg}; }} svg .style-{id} {{ stroke: {sc}; stroke-width: {sw}; fill: {bg}; }} """.format(**row).split())) print('</style>') print('</head>') cur.execute("select * from scenes") cols = [tup[0] for tup in cur.description] ntup = namedtuple('rec', cols) templates = { 'node': '<rect x="{x}" y="{y}" class="style-{style}" ' ' width="{w}" height="{h}" />', 'edge': '<line x1="{x0}" y1="{y0}" class="style-{style}"' ' x2="{x1}" y2="{y1}" />', } print('<body>') for filename, rows in it.groupby(cur.fetchall(), lambda r: r[0]): print(' <h1>%s</h1>' % filename) print(' <svg>') for row in rows: rec = ntup(*row) print(' ',templates.get(rec.tag, rec.tag or '') .format(**rec.__dict__)) print(' </svg>') print('</body>') print('</html>') if __name__=="__main__": if len(sys.argv) > 1: main(sys.argv[1:]) else: print(__doc__)
tangentstorm/scenetool
spike/vue2svg.py
Python
mit
5,403
from staffjoy.resource import Resource from staffjoy.resources.location import Location from staffjoy.resources.admin import Admin from staffjoy.resources.organization_worker import OrganizationWorker class Organization(Resource): PATH = "organizations/{organization_id}" ID_NAME = "organization_id" def get_locations(self, **kwargs): return Location.get_all(parent=self, **kwargs) def get_location(self, id): return Location.get(parent=self, id=id) def create_location(self, **kwargs): return Location.create(parent=self, **kwargs) def get_admins(self): return Admin.get_all(parent=self) def get_admin(self, id): return Admin.get(parent=self, id=id) def create_admin(self, **kwargs): """Typically just pass email""" return Admin.create(parent=self, **kwargs) def get_workers(self, **kwargs): return OrganizationWorker.get_all(parent=self, **kwargs)
Staffjoy/client_python
staffjoy/resources/organization.py
Python
mit
959
# coding=utf-8 import logging import time from adapter import Adapter DROIDBOT_APP_PACKAGE = "io.github.ylimit.droidbotapp" IME_SERVICE = DROIDBOT_APP_PACKAGE + "/.DroidBotIME" class DroidBotImeException(Exception): """ Exception in telnet connection """ pass class DroidBotIme(Adapter): """ a connection with droidbot ime app. """ def __init__(self, device=None): """ initiate a emulator console via telnet :param device: instance of Device :return: """ self.logger = logging.getLogger(self.__class__.__name__) if device is None: from droidbot.device import Device device = Device() self.device = device self.connected = False def set_up(self): device = self.device if DROIDBOT_APP_PACKAGE in device.adb.get_installed_apps(): self.logger.debug("DroidBot app was already installed.") else: # install droidbot app try: import pkg_resources droidbot_app_path = pkg_resources.resource_filename("droidbot", "resources/droidbotApp.apk") install_cmd = "install %s" % droidbot_app_path self.device.adb.run_cmd(install_cmd) self.logger.debug("DroidBot app installed.") except Exception as e: self.logger.warning(e.message) self.logger.warning("Failed to install DroidBotApp.") def tear_down(self): self.device.uninstall_app(DROIDBOT_APP_PACKAGE) def connect(self): r_enable = self.device.adb.shell("ime enable %s" % IME_SERVICE) if r_enable.endswith("now enabled"): r_set = self.device.adb.shell("ime set %s" % IME_SERVICE) if r_set.endswith("selected"): self.connected = True return self.logger.warning("Failed to connect DroidBotIME!") def check_connectivity(self): """ check if droidbot app is connected :return: True for connected """ return self.connected def disconnect(self): """ disconnect telnet """ self.connected = False r_disable = self.device.adb.shell("ime disable %s" % IME_SERVICE) if r_disable.endswith("now disabled"): self.connected = False print "[CONNECTION] %s is disconnected" % self.__class__.__name__ return self.logger.warning("Failed to disconnect DroidBotIME!") def input_text(self, text, mode=0): """ Input text to target device :param text: text to input, can be unicode format :param mode: 0 - set text; 1 - append text. """ input_cmd = "am broadcast -a DROIDBOT_INPUT_TEXT --es text \"%s\" --ei mode %d" % (text, mode) self.device.adb.shell(input_cmd) if __name__ == "__main__": droidbot_ime_conn = DroidBotIme() droidbot_ime_conn.set_up() droidbot_ime_conn.connect() droidbot_ime_conn.input_text("hello world!", 0) droidbot_ime_conn.input_text(u"世界你好!", 1) time.sleep(2) droidbot_ime_conn.input_text(u"再见。Bye bye.", 0) droidbot_ime_conn.disconnect() droidbot_ime_conn.tear_down()
nastya/droidbot
droidbot/adapter/droidbot_ime.py
Python
mit
3,282
from __future__ import annotations import operator from collections import defaultdict from contextlib import ExitStack from datetime import datetime, timedelta, timezone from logging import Logger, getLogger from typing import Any, Callable, ClassVar, Iterable from uuid import UUID import attrs import pymongo import tenacity from attrs.validators import instance_of from bson import CodecOptions, UuidRepresentation from bson.codec_options import TypeEncoder, TypeRegistry from pymongo import ASCENDING, DeleteOne, MongoClient, UpdateOne from pymongo.collection import Collection from pymongo.errors import ConnectionFailure, DuplicateKeyError from tenacity import Retrying from ..abc import DataStore, EventBroker, EventSource, Job, Schedule, Serializer from ..enums import CoalescePolicy, ConflictPolicy, JobOutcome from ..eventbrokers.local import LocalEventBroker from ..events import ( DataStoreEvent, JobAcquired, JobAdded, JobReleased, ScheduleAdded, ScheduleRemoved, ScheduleUpdated, TaskAdded, TaskRemoved, TaskUpdated) from ..exceptions import ( ConflictingIdError, DeserializationError, SerializationError, TaskLookupError) from ..serializers.pickle import PickleSerializer from ..structures import JobResult, RetrySettings, Task from ..util import reentrant class CustomEncoder(TypeEncoder): def __init__(self, python_type: type, encoder: Callable): self._python_type = python_type self._encoder = encoder @property def python_type(self) -> type: return self._python_type def transform_python(self, value: Any) -> Any: return self._encoder(value) def ensure_uuid_presentation(client: MongoClient) -> None: # if client. pass @reentrant @attrs.define(eq=False) class MongoDBDataStore(DataStore): client: MongoClient = attrs.field(validator=instance_of(MongoClient)) serializer: Serializer = attrs.field(factory=PickleSerializer, kw_only=True) database: str = attrs.field(default='apscheduler', kw_only=True) lock_expiration_delay: float = attrs.field(default=30, kw_only=True) retry_settings: RetrySettings = attrs.field(default=RetrySettings()) start_from_scratch: bool = attrs.field(default=False, kw_only=True) _task_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Task)] _schedule_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Schedule)] _job_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Job)] _logger: Logger = attrs.field(init=False, factory=lambda: getLogger(__name__)) _retrying: Retrying = attrs.field(init=False) _exit_stack: ExitStack = attrs.field(init=False, factory=ExitStack) _events: EventBroker = attrs.field(init=False, factory=LocalEventBroker) _local_tasks: dict[str, Task] = attrs.field(init=False, factory=dict) def __attrs_post_init__(self) -> None: # Construct the Tenacity retry controller self._retrying = Retrying(stop=self.retry_settings.stop, wait=self.retry_settings.wait, retry=tenacity.retry_if_exception_type(ConnectionFailure), after=self._after_attempt, reraise=True) type_registry = TypeRegistry([ CustomEncoder(timedelta, timedelta.total_seconds), CustomEncoder(ConflictPolicy, operator.attrgetter('name')), CustomEncoder(CoalescePolicy, operator.attrgetter('name')), CustomEncoder(JobOutcome, operator.attrgetter('name')) ]) codec_options = CodecOptions(tz_aware=True, type_registry=type_registry, uuid_representation=UuidRepresentation.STANDARD) database = self.client.get_database(self.database, codec_options=codec_options) self._tasks: Collection = database['tasks'] self._schedules: Collection = database['schedules'] self._jobs: Collection = database['jobs'] self._jobs_results: Collection = database['job_results'] @classmethod def from_url(cls, uri: str, **options) -> MongoDBDataStore: client = MongoClient(uri) return cls(client, **options) @property def events(self) -> EventSource: return self._events def _after_attempt(self, retry_state: tenacity.RetryCallState) -> None: self._logger.warning('Temporary data store error (attempt %d): %s', retry_state.attempt_number, retry_state.outcome.exception()) def __enter__(self): server_info = self.client.server_info() if server_info['versionArray'] < [4, 0]: raise RuntimeError(f"MongoDB server must be at least v4.0; current version = " f"{server_info['version']}") self._exit_stack.__enter__() self._exit_stack.enter_context(self._events) for attempt in self._retrying: with attempt, self.client.start_session() as session: if self.start_from_scratch: self._tasks.delete_many({}, session=session) self._schedules.delete_many({}, session=session) self._jobs.delete_many({}, session=session) self._jobs_results.delete_many({}, session=session) self._schedules.create_index('next_fire_time', session=session) self._jobs.create_index('task_id', session=session) self._jobs.create_index('created_at', session=session) self._jobs.create_index('tags', session=session) self._jobs_results.create_index('finished_at', session=session) return self def __exit__(self, exc_type, exc_val, exc_tb): self._exit_stack.__exit__(exc_type, exc_val, exc_tb) def add_task(self, task: Task) -> None: for attempt in self._retrying: with attempt: previous = self._tasks.find_one_and_update( {'_id': task.id}, {'$set': task.marshal(self.serializer), '$setOnInsert': {'running_jobs': 0}}, upsert=True ) self._local_tasks[task.id] = task if previous: self._events.publish(TaskUpdated(task_id=task.id)) else: self._events.publish(TaskAdded(task_id=task.id)) def remove_task(self, task_id: str) -> None: for attempt in self._retrying: with attempt: if not self._tasks.find_one_and_delete({'_id': task_id}): raise TaskLookupError(task_id) del self._local_tasks[task_id] self._events.publish(TaskRemoved(task_id=task_id)) def get_task(self, task_id: str) -> Task: try: return self._local_tasks[task_id] except KeyError: for attempt in self._retrying: with attempt: document = self._tasks.find_one({'_id': task_id}, projection=self._task_attrs) if not document: raise TaskLookupError(task_id) document['id'] = document.pop('id') task = self._local_tasks[task_id] = Task.unmarshal(self.serializer, document) return task def get_tasks(self) -> list[Task]: for attempt in self._retrying: with attempt: tasks: list[Task] = [] for document in self._tasks.find(projection=self._task_attrs, sort=[('_id', pymongo.ASCENDING)]): document['id'] = document.pop('_id') tasks.append(Task.unmarshal(self.serializer, document)) return tasks def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]: filters = {'_id': {'$in': list(ids)}} if ids is not None else {} for attempt in self._retrying: with attempt: schedules: list[Schedule] = [] cursor = self._schedules.find(filters).sort('_id') for document in cursor: document['id'] = document.pop('_id') try: schedule = Schedule.unmarshal(self.serializer, document) except DeserializationError: self._logger.warning('Failed to deserialize schedule %r', document['_id']) continue schedules.append(schedule) return schedules def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None: event: DataStoreEvent document = schedule.marshal(self.serializer) document['_id'] = document.pop('id') try: for attempt in self._retrying: with attempt: self._schedules.insert_one(document) except DuplicateKeyError: if conflict_policy is ConflictPolicy.exception: raise ConflictingIdError(schedule.id) from None elif conflict_policy is ConflictPolicy.replace: for attempt in self._retrying: with attempt: self._schedules.replace_one({'_id': schedule.id}, document, True) event = ScheduleUpdated( schedule_id=schedule.id, next_fire_time=schedule.next_fire_time) self._events.publish(event) else: event = ScheduleAdded(schedule_id=schedule.id, next_fire_time=schedule.next_fire_time) self._events.publish(event) def remove_schedules(self, ids: Iterable[str]) -> None: filters = {'_id': {'$in': list(ids)}} if ids is not None else {} for attempt in self._retrying: with attempt, self.client.start_session() as session: cursor = self._schedules.find(filters, projection=['_id'], session=session) ids = [doc['_id'] for doc in cursor] if ids: self._schedules.delete_many(filters, session=session) for schedule_id in ids: self._events.publish(ScheduleRemoved(schedule_id=schedule_id)) def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]: for attempt in self._retrying: with attempt, self.client.start_session() as session: schedules: list[Schedule] = [] cursor = self._schedules.find( {'next_fire_time': {'$ne': None}, '$or': [{'acquired_until': {'$exists': False}}, {'acquired_until': {'$lt': datetime.now(timezone.utc)}}] }, session=session ).sort('next_fire_time').limit(limit) for document in cursor: document['id'] = document.pop('_id') schedule = Schedule.unmarshal(self.serializer, document) schedules.append(schedule) if schedules: now = datetime.now(timezone.utc) acquired_until = datetime.fromtimestamp( now.timestamp() + self.lock_expiration_delay, now.tzinfo) filters = {'_id': {'$in': [schedule.id for schedule in schedules]}} update = {'$set': {'acquired_by': scheduler_id, 'acquired_until': acquired_until}} self._schedules.update_many(filters, update, session=session) return schedules def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None: updated_schedules: list[tuple[str, datetime]] = [] finished_schedule_ids: list[str] = [] # Update schedules that have a next fire time requests = [] for schedule in schedules: filters = {'_id': schedule.id, 'acquired_by': scheduler_id} if schedule.next_fire_time is not None: try: serialized_trigger = self.serializer.serialize(schedule.trigger) except SerializationError: self._logger.exception('Error serializing schedule %r – ' 'removing from data store', schedule.id) requests.append(DeleteOne(filters)) finished_schedule_ids.append(schedule.id) continue update = { '$unset': { 'acquired_by': True, 'acquired_until': True, }, '$set': { 'trigger': serialized_trigger, 'next_fire_time': schedule.next_fire_time } } requests.append(UpdateOne(filters, update)) updated_schedules.append((schedule.id, schedule.next_fire_time)) else: requests.append(DeleteOne(filters)) finished_schedule_ids.append(schedule.id) if requests: for attempt in self._retrying: with attempt, self.client.start_session() as session: self._schedules.bulk_write(requests, ordered=False, session=session) for schedule_id, next_fire_time in updated_schedules: event = ScheduleUpdated(schedule_id=schedule_id, next_fire_time=next_fire_time) self._events.publish(event) for schedule_id in finished_schedule_ids: self._events.publish(ScheduleRemoved(schedule_id=schedule_id)) def get_next_schedule_run_time(self) -> datetime | None: for attempt in self._retrying: with attempt: document = self._schedules.find_one({'next_run_time': {'$ne': None}}, projection=['next_run_time'], sort=[('next_run_time', ASCENDING)]) if document: return document['next_run_time'] else: return None def add_job(self, job: Job) -> None: document = job.marshal(self.serializer) document['_id'] = document.pop('id') for attempt in self._retrying: with attempt: self._jobs.insert_one(document) event = JobAdded(job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id, tags=job.tags) self._events.publish(event) def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]: filters = {'_id': {'$in': list(ids)}} if ids is not None else {} for attempt in self._retrying: with attempt: jobs: list[Job] = [] cursor = self._jobs.find(filters).sort('_id') for document in cursor: document['id'] = document.pop('_id') try: job = Job.unmarshal(self.serializer, document) except DeserializationError: self._logger.warning('Failed to deserialize job %r', document['id']) continue jobs.append(job) return jobs def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]: for attempt in self._retrying: with attempt, self.client.start_session() as session: cursor = self._jobs.find( {'$or': [{'acquired_until': {'$exists': False}}, {'acquired_until': {'$lt': datetime.now(timezone.utc)}}] }, sort=[('created_at', ASCENDING)], limit=limit, session=session ) documents = list(cursor) # Retrieve the limits task_ids: set[str] = {document['task_id'] for document in documents} task_limits = self._tasks.find( {'_id': {'$in': list(task_ids)}, 'max_running_jobs': {'$ne': None}}, projection=['max_running_jobs', 'running_jobs'], session=session ) job_slots_left = {doc['_id']: doc['max_running_jobs'] - doc['running_jobs'] for doc in task_limits} # Filter out jobs that don't have free slots acquired_jobs: list[Job] = [] increments: dict[str, int] = defaultdict(lambda: 0) for document in documents: document['id'] = document.pop('_id') job = Job.unmarshal(self.serializer, document) # Don't acquire the job if there are no free slots left slots_left = job_slots_left.get(job.task_id) if slots_left == 0: continue elif slots_left is not None: job_slots_left[job.task_id] -= 1 acquired_jobs.append(job) increments[job.task_id] += 1 if acquired_jobs: now = datetime.now(timezone.utc) acquired_until = datetime.fromtimestamp( now.timestamp() + self.lock_expiration_delay, timezone.utc) filters = {'_id': {'$in': [job.id for job in acquired_jobs]}} update = {'$set': {'acquired_by': worker_id, 'acquired_until': acquired_until}} self._jobs.update_many(filters, update, session=session) # Increment the running job counters on each task for task_id, increment in increments.items(): self._tasks.find_one_and_update( {'_id': task_id}, {'$inc': {'running_jobs': increment}}, session=session ) # Publish the appropriate events for job in acquired_jobs: self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id)) return acquired_jobs def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None: for attempt in self._retrying: with attempt, self.client.start_session() as session: # Insert the job result document = result.marshal(self.serializer) document['_id'] = document.pop('job_id') self._jobs_results.insert_one(document, session=session) # Decrement the running jobs counter self._tasks.find_one_and_update( {'_id': task_id}, {'$inc': {'running_jobs': -1}}, session=session ) # Delete the job self._jobs.delete_one({'_id': result.job_id}, session=session) # Publish the event self._events.publish( JobReleased(job_id=result.job_id, worker_id=worker_id, outcome=result.outcome) ) def get_job_result(self, job_id: UUID) -> JobResult | None: for attempt in self._retrying: with attempt: document = self._jobs_results.find_one_and_delete({'_id': job_id}) if document: document['job_id'] = document.pop('_id') return JobResult.unmarshal(self.serializer, document) else: return None
agronholm/apscheduler
src/apscheduler/datastores/mongodb.py
Python
mit
19,667
# coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import pytest import functools from devtools_testutils.aio import recorded_by_proxy_async from azure.ai.formrecognizer._generated.models import AnalyzeResultOperation from azure.ai.formrecognizer.aio import DocumentAnalysisClient from azure.ai.formrecognizer import AnalyzeResult from preparers import FormRecognizerPreparer from asynctestcase import AsyncFormRecognizerTest from preparers import GlobalClientPreparer as _GlobalClientPreparer DocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient) class TestDACAnalyzeDocumentAsync(AsyncFormRecognizerTest): def teardown(self): self.sleep(4) @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy_async async def test_document_stream_transform_pdf(self, client): with open(self.invoice_pdf, "rb") as fd: document = fd.read() responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeResultOperation, raw_response) extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result) responses.append(analyze_result) responses.append(extracted_document) async with client: poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback) result = await poller.result() raw_analyze_result = responses[0].analyze_result returned_model = responses[1] # Check AnalyzeResult assert returned_model.model_id == raw_analyze_result.model_id assert returned_model.api_version == raw_analyze_result.api_version assert returned_model.content == raw_analyze_result.content self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages) self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents) self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables) self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs) self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities) self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles) # check page range assert len(raw_analyze_result.pages) == len(returned_model.pages) @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy_async async def test_document_stream_transform_jpg(self, client): with open(self.form_jpg, "rb") as fd: document = fd.read() responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeResultOperation, raw_response) extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result) responses.append(analyze_result) responses.append(extracted_document) async with client: poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback) result = await poller.result() raw_analyze_result = responses[0].analyze_result returned_model = responses[1] # Check AnalyzeResult assert returned_model.model_id == raw_analyze_result.model_id assert returned_model.api_version == raw_analyze_result.api_version assert returned_model.content == raw_analyze_result.content self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages) self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents) self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables) self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs) self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities) self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles) # check page range assert len(raw_analyze_result.pages) == len(returned_model.pages) @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy_async async def test_document_multipage_transform(self, client): with open(self.multipage_invoice_pdf, "rb") as fd: document = fd.read() responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeResultOperation, raw_response) extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result) responses.append(analyze_result) responses.append(extracted_document) async with client: poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback) result = await poller.result() raw_analyze_result = responses[0].analyze_result returned_model = responses[1] # Check AnalyzeResult assert returned_model.model_id == raw_analyze_result.model_id assert returned_model.api_version == raw_analyze_result.api_version assert returned_model.content == raw_analyze_result.content self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages) self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents) self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables) self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs) self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities) self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles) # check page range assert len(raw_analyze_result.pages) == len(returned_model.pages) @pytest.mark.live_test_only @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy_async async def test_document_multipage_table_span_pdf(self, client, **kwargs): with open(self.multipage_table_pdf, "rb") as fd: my_file = fd.read() async with client: poller = await client.begin_analyze_document("prebuilt-document", my_file) document = await poller.result() assert len(document.tables) == 3 assert document.tables[0].row_count == 30 assert document.tables[0].column_count == 5 assert document.tables[1].row_count == 6 assert document.tables[1].column_count == 5 assert document.tables[2].row_count == 23 assert document.tables[2].column_count == 5 @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy_async async def test_document_specify_pages(self, client): with open(self.multipage_invoice_pdf, "rb") as fd: document = fd.read() async with client: poller = await client.begin_analyze_document("prebuilt-document", document, pages="1") result = await poller.result() assert len(result.pages) == 1 poller = await client.begin_analyze_document("prebuilt-document", document, pages="1, 3") result = await poller.result() assert len(result.pages) == 2 poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2") result = await poller.result() assert len(result.pages) == 2 poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2, 3") result = await poller.result() assert len(result.pages) == 3
Azure/azure-sdk-for-python
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py
Python
mit
8,184
import os import unittest import xcube.core.store as xcube_store from cate.core.ds import DATA_STORE_POOL def _create_test_data_store_config(name: str): local_test_store_path = \ os.path.join(os.path.dirname(__file__), 'ds', 'resources', 'datasources', name) local_test_store_dict = { "store_id": "file", "store_params": { "root": local_test_store_path }, "title": f"Local Test Store '{name}'" } local_test_store = xcube_store.DataStoreConfig.from_dict(local_test_store_dict) return local_test_store class StoreTest(unittest.TestCase): _orig_store_configs = None @classmethod def setUpClass(cls): cls._orig_store_configs = {instance_id: DATA_STORE_POOL.get_store_config(instance_id) for instance_id in DATA_STORE_POOL.store_instance_ids} for instance_id in DATA_STORE_POOL.store_instance_ids: DATA_STORE_POOL.remove_store_config(instance_id) DATA_STORE_POOL.add_store_config('local_test_store_1', _create_test_data_store_config('local')) DATA_STORE_POOL.add_store_config('local_test_store_2', _create_test_data_store_config('local2')) @classmethod def tearDownClass(cls): for instance_id in DATA_STORE_POOL.store_instance_ids: DATA_STORE_POOL.remove_store_config(instance_id) for instance_id, config in cls._orig_store_configs.items(): DATA_STORE_POOL.add_store_config(instance_id, config)
CCI-Tools/cate-core
tests/storetest.py
Python
mit
1,590
from gusto import * from gusto import thermodynamics from firedrake import (PeriodicIntervalMesh, ExtrudedMesh, SpatialCoordinate, conditional, cos, pi, sqrt, NonlinearVariationalProblem, NonlinearVariationalSolver, TestFunction, dx, TrialFunction, Constant, Function, LinearVariationalProblem, LinearVariationalSolver, DirichletBC, FunctionSpace, BrokenElement, VectorFunctionSpace) import sys if '--recovered' in sys.argv: recovered = True else: recovered = False if '--limit' in sys.argv: limit = True else: limit = False if '--diffusion' in sys.argv: diffusion = True else: diffusion = False dt = 1.0 if '--running-tests' in sys.argv: tmax = 10. deltax = 1000. else: deltax = 100. if recovered else 200 tmax = 1000. L = 10000. H = 10000. nlayers = int(H/deltax) ncolumns = int(L/deltax) m = PeriodicIntervalMesh(ncolumns, L) mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers) degree = 0 if recovered else 1 fieldlist = ['u', 'rho', 'theta'] timestepping = TimesteppingParameters(dt=dt, maxk=4, maxi=1) dirname = 'moist_bf' if recovered: dirname += '_recovered' if limit: dirname += '_limit' if diffusion: dirname += '_diffusion' output = OutputParameters(dirname=dirname, dumpfreq=20, dumplist=['u'], perturbation_fields=[], log_level='INFO') params = CompressibleParameters() diagnostics = Diagnostics(*fieldlist) diagnostic_fields = [Theta_e(), InternalEnergy(), Perturbation('InternalEnergy'), PotentialEnergy()] state = State(mesh, vertical_degree=degree, horizontal_degree=degree, family="CG", timestepping=timestepping, output=output, parameters=params, diagnostics=diagnostics, fieldlist=fieldlist, diagnostic_fields=diagnostic_fields) # Initial conditions u0 = state.fields("u") rho0 = state.fields("rho") theta0 = state.fields("theta") water_v0 = state.fields("water_v", theta0.function_space()) water_c0 = state.fields("water_c", theta0.function_space()) moisture = ["water_v", "water_c"] # spaces Vu = state.spaces("HDiv") Vt = state.spaces("HDiv_v") Vr = state.spaces("DG") x, z = SpatialCoordinate(mesh) quadrature_degree = (4, 4) dxp = dx(degree=(quadrature_degree)) # Define constant theta_e and water_t Tsurf = 320.0 total_water = 0.02 theta_e = Function(Vt).assign(Tsurf) water_t = Function(Vt).assign(total_water) # Calculate hydrostatic fields saturated_hydrostatic_balance(state, theta_e, water_t) # make mean fields theta_b = Function(Vt).assign(theta0) rho_b = Function(Vr).assign(rho0) water_vb = Function(Vt).assign(water_v0) water_cb = Function(Vt).assign(water_t - water_vb) pibar = thermodynamics.pi(state.parameters, rho_b, theta_b) Tb = thermodynamics.T(state.parameters, theta_b, pibar, r_v=water_vb) Ibar = state.fields("InternalEnergybar", Vt, dump=False) Ibar.interpolate(thermodynamics.internal_energy(state.parameters, rho_b, Tb, r_v=water_vb, r_l=water_cb)) # define perturbation xc = L / 2 zc = 2000. rc = 2000. Tdash = 2.0 r = sqrt((x - xc) ** 2 + (z - zc) ** 2) theta_pert = Function(Vt).interpolate(conditional(r > rc, 0.0, Tdash * (cos(pi * r / (2.0 * rc))) ** 2)) # define initial theta theta0.assign(theta_b * (theta_pert / 300.0 + 1.0)) # find perturbed rho gamma = TestFunction(Vr) rho_trial = TrialFunction(Vr) a = gamma * rho_trial * dxp L = gamma * (rho_b * theta_b / theta0) * dxp rho_problem = LinearVariationalProblem(a, L, rho0) rho_solver = LinearVariationalSolver(rho_problem) rho_solver.solve() physics_boundary_method = Boundary_Method.physics if recovered else None # find perturbed water_v w_v = Function(Vt) phi = TestFunction(Vt) rho_averaged = Function(Vt) rho_recoverer = Recoverer(rho0, rho_averaged, VDG=FunctionSpace(mesh, BrokenElement(Vt.ufl_element())), boundary_method=physics_boundary_method) rho_recoverer.project() pi = thermodynamics.pi(state.parameters, rho_averaged, theta0) p = thermodynamics.p(state.parameters, pi) T = thermodynamics.T(state.parameters, theta0, pi, r_v=w_v) w_sat = thermodynamics.r_sat(state.parameters, T, p) w_functional = (phi * w_v * dxp - phi * w_sat * dxp) w_problem = NonlinearVariationalProblem(w_functional, w_v) w_solver = NonlinearVariationalSolver(w_problem) w_solver.solve() water_v0.assign(w_v) water_c0.assign(water_t - water_v0) # initialise fields state.initialise([('u', u0), ('rho', rho0), ('theta', theta0), ('water_v', water_v0), ('water_c', water_c0)]) state.set_reference_profiles([('rho', rho_b), ('theta', theta_b), ('water_v', water_vb)]) # set up limiter if limit: if recovered: limiter = VertexBasedLimiter(VDG1) else: limiter = ThetaLimiter(Vt) else: limiter = None # Set up advection schemes if recovered: VDG1 = state.spaces("DG1") VCG1 = FunctionSpace(mesh, "CG", 1) Vt_brok = FunctionSpace(mesh, BrokenElement(Vt.ufl_element())) Vu_DG1 = VectorFunctionSpace(mesh, VDG1.ufl_element()) Vu_CG1 = VectorFunctionSpace(mesh, "CG", 1) u_opts = RecoveredOptions(embedding_space=Vu_DG1, recovered_space=Vu_CG1, broken_space=Vu, boundary_method=Boundary_Method.dynamics) rho_opts = RecoveredOptions(embedding_space=VDG1, recovered_space=VCG1, broken_space=Vr, boundary_method=Boundary_Method.dynamics) theta_opts = RecoveredOptions(embedding_space=VDG1, recovered_space=VCG1, broken_space=Vt_brok) ueqn = EmbeddedDGAdvection(state, Vu, equation_form="advective", options=u_opts) rhoeqn = EmbeddedDGAdvection(state, Vr, equation_form="continuity", options=rho_opts) thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form="advective", options=theta_opts) else: ueqn = VectorInvariant(state, Vu) rhoeqn = AdvectionEquation(state, Vr, equation_form="continuity") thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form="advective", options=EmbeddedDGOptions()) u_advection = ('u', SSPRK3(state, u0, ueqn)) if recovered else ('u', ThetaMethod(state, u0, ueqn)) advected_fields = [u_advection, ('rho', SSPRK3(state, rho0, rhoeqn)), ('theta', SSPRK3(state, theta0, thetaeqn, limiter=limiter)), ('water_v', SSPRK3(state, water_v0, thetaeqn, limiter=limiter)), ('water_c', SSPRK3(state, water_c0, thetaeqn, limiter=limiter))] # Set up linear solver linear_solver = CompressibleSolver(state, moisture=moisture) # Set up forcing compressible_forcing = CompressibleForcing(state, moisture=moisture) # diffusion bcs = [DirichletBC(Vu, 0.0, "bottom"), DirichletBC(Vu, 0.0, "top")] diffused_fields = [] if diffusion: diffused_fields.append(('u', InteriorPenalty(state, Vu, kappa=Constant(60.), mu=Constant(10./deltax), bcs=bcs))) # define condensation physics_list = [Condensation(state)] # build time stepper stepper = CrankNicolson(state, advected_fields, linear_solver, compressible_forcing, physics_list=physics_list, diffused_fields=diffused_fields) stepper.run(t=0, tmax=tmax)
firedrakeproject/gusto
examples/moist_bf_bubble.py
Python
mit
7,797