')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n- return self.finish(html)\n+ self.finish(html)\n \n def render_linked_js(self, js_files):\n \"\"\"Default method used to render the final js links for the\n@@ -1004,20 +993,7 @@ class RequestHandler(object):\n return future\n \n def finish(self, chunk=None):\n- \"\"\"Finishes this response, ending the HTTP request.\n-\n- Passing a ``chunk`` to ``finish()`` is equivalent to passing that\n- chunk to ``write()`` and then calling ``finish()`` with no arguments.\n-\n- Returns a `.Future` which may optionally be awaited to track the sending\n- of the response to the client. This `.Future` resolves when all the response\n- data has been sent, and raises an error if the connection is closed before all\n- data can be sent.\n-\n- .. versionchanged:: 5.1\n-\n- Now returns a `.Future` instead of ``None``.\n- \"\"\"\n+ \"\"\"Finishes this response, ending the HTTP request.\"\"\"\n if self._finished:\n raise RuntimeError(\"finish() called twice\")\n \n@@ -1049,13 +1025,12 @@ class RequestHandler(object):\n # are keepalive connections)\n self.request.connection.set_close_callback(None)\n \n- future = self.flush(include_footers=True)\n+ self.flush(include_footers=True)\n self.request.connection.finish()\n self._log()\n self._finished = True\n self.on_finish()\n self._break_cycles()\n- return future\n \n def detach(self):\n \"\"\"Take control of the underlying stream.\n"},"problem_statement":{"kind":"string","value":"autoreload: Fix argv preservation\n`autoreload` currently has a wrapper mode (e.g. `python -m tornado.autoreload -m tornado.test`) for scripts, and an in-process mode (enabled by `Application(..., debug=True)`). It's useful to combine these, since the wrapper can catch syntax errors that cause the process to abort before entering its IOLoop. However, this doesn't work as well as it should, because the `main` wrapper only restores `sys.argv` if the process exits, meaning the `-m tornado.autoreload` flags are lost if the inner autoreload fires. The original argv needs to be stored in a global when `autoreload` is `__main__`, so that it can be used in `_reload()`."},"repo":{"kind":"string","value":"tornadoweb/tornado"},"test_patch":{"kind":"string","value":"diff --git a/tornado/test/autoreload_test.py b/tornado/test/autoreload_test.py\nindex 6a9729db..1ea53167 100644\n--- a/tornado/test/autoreload_test.py\n+++ b/tornado/test/autoreload_test.py\n@@ -1,14 +1,19 @@\n from __future__ import absolute_import, division, print_function\n import os\n+import shutil\n import subprocess\n from subprocess import Popen\n import sys\n from tempfile import mkdtemp\n+import time\n \n from tornado.test.util import unittest\n \n \n-MAIN = \"\"\"\\\n+class AutoreloadTest(unittest.TestCase):\n+\n+ def test_reload_module(self):\n+ main = \"\"\"\\\n import os\n import sys\n \n@@ -24,15 +29,13 @@ if 'TESTAPP_STARTED' not in os.environ:\n autoreload._reload()\n \"\"\"\n \n-\n-class AutoreloadTest(unittest.TestCase):\n- def test_reload_module(self):\n # Create temporary test application\n path = mkdtemp()\n+ self.addCleanup(shutil.rmtree, path)\n os.mkdir(os.path.join(path, 'testapp'))\n open(os.path.join(path, 'testapp/__init__.py'), 'w').close()\n with open(os.path.join(path, 'testapp/__main__.py'), 'w') as f:\n- f.write(MAIN)\n+ f.write(main)\n \n # Make sure the tornado module under test is available to the test\n # application\n@@ -46,3 +49,64 @@ class AutoreloadTest(unittest.TestCase):\n universal_newlines=True)\n out = p.communicate()[0]\n self.assertEqual(out, 'Starting\\nStarting\\n')\n+\n+ def test_reload_wrapper_preservation(self):\n+ # This test verifies that when `python -m tornado.autoreload`\n+ # is used on an application that also has an internal\n+ # autoreload, the reload wrapper is preserved on restart.\n+ main = \"\"\"\\\n+import os\n+import sys\n+\n+# This import will fail if path is not set up correctly\n+import testapp\n+\n+if 'tornado.autoreload' not in sys.modules:\n+ raise Exception('started without autoreload wrapper')\n+\n+import tornado.autoreload\n+\n+print('Starting')\n+sys.stdout.flush()\n+if 'TESTAPP_STARTED' not in os.environ:\n+ os.environ['TESTAPP_STARTED'] = '1'\n+ # Simulate an internal autoreload (one not caused\n+ # by the wrapper).\n+ tornado.autoreload._reload()\n+else:\n+ # Exit directly so autoreload doesn't catch it.\n+ os._exit(0)\n+\"\"\"\n+\n+ # Create temporary test application\n+ path = mkdtemp()\n+ os.mkdir(os.path.join(path, 'testapp'))\n+ self.addCleanup(shutil.rmtree, path)\n+ init_file = os.path.join(path, 'testapp', '__init__.py')\n+ open(init_file, 'w').close()\n+ main_file = os.path.join(path, 'testapp', '__main__.py')\n+ with open(main_file, 'w') as f:\n+ f.write(main)\n+\n+ # Make sure the tornado module under test is available to the test\n+ # application\n+ pythonpath = os.getcwd()\n+ if 'PYTHONPATH' in os.environ:\n+ pythonpath += os.pathsep + os.environ['PYTHONPATH']\n+\n+ autoreload_proc = Popen(\n+ [sys.executable, '-m', 'tornado.autoreload', '-m', 'testapp'],\n+ stdout=subprocess.PIPE, cwd=path,\n+ env=dict(os.environ, PYTHONPATH=pythonpath),\n+ universal_newlines=True)\n+\n+ for i in range(20):\n+ if autoreload_proc.poll() is not None:\n+ break\n+ time.sleep(0.1)\n+ else:\n+ autoreload_proc.kill()\n+ raise Exception(\"subprocess failed to terminate\")\n+\n+ out = autoreload_proc.communicate()[0]\n+ self.assertEqual(out, 'Starting\\n' * 2)\ndiff --git a/tornado/test/web_test.py b/tornado/test/web_test.py\nindex b77311df..45072aac 100644\n--- a/tornado/test/web_test.py\n+++ b/tornado/test/web_test.py\n@@ -191,40 +191,6 @@ class SecureCookieV2Test(unittest.TestCase):\n self.assertEqual(new_handler.get_secure_cookie('foo'), None)\n \n \n-class FinalReturnTest(WebTestCase):\n- def get_handlers(self):\n- test = self\n-\n- class FinishHandler(RequestHandler):\n- @gen.coroutine\n- def get(self):\n- test.final_return = self.finish()\n-\n- class RenderHandler(RequestHandler):\n- def create_template_loader(self, path):\n- return DictLoader({'foo.html': 'hi'})\n-\n- @gen.coroutine\n- def get(self):\n- test.final_return = self.render('foo.html')\n-\n- return [(\"/finish\", FinishHandler),\n- (\"/render\", RenderHandler)]\n-\n- def get_app_kwargs(self):\n- return dict(template_path='FinalReturnTest')\n-\n- def test_finish_method_return_future(self):\n- response = self.fetch(self.get_url('/finish'))\n- self.assertEqual(response.code, 200)\n- self.assertIsInstance(self.final_return, Future)\n-\n- def test_render_method_return_future(self):\n- response = self.fetch(self.get_url('/render'))\n- self.assertEqual(response.code, 200)\n- self.assertIsInstance(self.final_return, Future)\n-\n-\n class CookieTest(WebTestCase):\n def get_handlers(self):\n class SetCookieHandler(RequestHandler):\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 2\n },\n \"num_modified_files\": 4\n}"},"version":{"kind":"string","value":"5.0"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\",\n \"flake8\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work\ncertifi==2021.5.30\nflake8==5.0.4\nimportlib-metadata==4.2.0\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\nmccabe==0.7.0\nmore-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work\npackaging @ file:///tmp/build/80754af9/packaging_1637314298585/work\npluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work\npy @ file:///opt/conda/conda-bld/py_1644396412707/work\npycodestyle==2.9.1\npyflakes==2.5.0\npyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work\npytest==6.2.4\ntoml @ file:///tmp/build/80754af9/toml_1616166611790/work\n-e git+https://github.com/tornadoweb/tornado.git@eb487cac3d829292ecca6e5124b1da5ae6bba407#egg=tornado\ntyping_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work\nzipp @ file:///tmp/build/80754af9/zipp_1633618647012/work\n"},"environment":{"kind":"string","value":"name: tornado\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - attrs=21.4.0=pyhd3eb1b0_0\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - importlib_metadata=4.8.1=hd3eb1b0_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - more-itertools=8.12.0=pyhd3eb1b0_0\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - packaging=21.3=pyhd3eb1b0_0\n - pip=21.2.2=py36h06a4308_0\n - pluggy=0.13.1=py36h06a4308_0\n - py=1.11.0=pyhd3eb1b0_0\n - pyparsing=3.0.4=pyhd3eb1b0_0\n - pytest=6.2.4=py36h06a4308_2\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - toml=0.10.2=pyhd3eb1b0_0\n - typing_extensions=4.1.1=pyh06a4308_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zipp=3.6.0=pyhd3eb1b0_0\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - flake8==5.0.4\n - importlib-metadata==4.2.0\n - mccabe==0.7.0\n - pycodestyle==2.9.1\n - pyflakes==2.5.0\nprefix: /opt/conda/envs/tornado\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tornado/test/autoreload_test.py::AutoreloadTest::test_reload_wrapper_preservation"],"string":"[\n \"tornado/test/autoreload_test.py::AutoreloadTest::test_reload_wrapper_preservation\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tornado/test/autoreload_test.py::AutoreloadTest::test_reload_module","tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes","tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp","tornado/test/web_test.py::SecureCookieV1Test::test_round_trip","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version","tornado/test/web_test.py::SecureCookieV2Test::test_round_trip","tornado/test/web_test.py::CookieTest::test_cookie_special_char","tornado/test/web_test.py::CookieTest::test_get_cookie","tornado/test/web_test.py::CookieTest::test_set_cookie","tornado/test/web_test.py::CookieTest::test_set_cookie_domain","tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days","tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags","tornado/test/web_test.py::CookieTest::test_set_cookie_max_age","tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite","tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect","tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect","tornado/test/web_test.py::ConnectionCloseTest::test_connection_close","tornado/test/web_test.py::RequestEncodingTest::test_error","tornado/test/web_test.py::RequestEncodingTest::test_group_encoding","tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark","tornado/test/web_test.py::RequestEncodingTest::test_slashes","tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument","tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode","tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus","tornado/test/web_test.py::WSGISafeWebTest::test_get_argument","tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments","tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments","tornado/test/web_test.py::WSGISafeWebTest::test_header_injection","tornado/test/web_test.py::WSGISafeWebTest::test_multi_header","tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip","tornado/test/web_test.py::WSGISafeWebTest::test_optional_path","tornado/test/web_test.py::WSGISafeWebTest::test_redirect","tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url","tornado/test/web_test.py::WSGISafeWebTest::test_types","tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources","tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped","tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect","tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash","tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush","tornado/test/web_test.py::NonWSGIWebTests::test_flow_control","tornado/test/web_test.py::ErrorResponseTest::test_default","tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error","tornado/test/web_test.py::ErrorResponseTest::test_write_error","tornado/test/web_test.py::StaticFileTest::test_absolute_static_url","tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion","tornado/test/web_test.py::StaticFileTest::test_include_host_override","tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection","tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion","tornado/test/web_test.py::StaticFileTest::test_root_static_path","tornado/test/web_test.py::StaticFileTest::test_static_304_etag_modified_bug","tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since","tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match","tornado/test/web_test.py::StaticFileTest::test_static_404","tornado/test/web_test.py::StaticFileTest::test_static_compressed_files","tornado/test/web_test.py::StaticFileTest::test_static_etag","tornado/test/web_test.py::StaticFileTest::test_static_files","tornado/test/web_test.py::StaticFileTest::test_static_head","tornado/test/web_test.py::StaticFileTest::test_static_head_range","tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch","tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone","tornado/test/web_test.py::StaticFileTest::test_static_invalid_range","tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match","tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start","tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix","tornado/test/web_test.py::StaticFileTest::test_static_url","tornado/test/web_test.py::StaticFileTest::test_static_with_range","tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge","tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file","tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end","tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end","tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end","tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename","tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect","tornado/test/web_test.py::StaticFileWithPathTest::test_serve","tornado/test/web_test.py::CustomStaticFileTest::test_serve","tornado/test/web_test.py::CustomStaticFileTest::test_static_url","tornado/test/web_test.py::HostMatchingTest::test_host_matching","tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching","tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups","tornado/test/web_test.py::ClearHeaderTest::test_clear_header","tornado/test/web_test.py::Header204Test::test_204_headers","tornado/test/web_test.py::Header304Test::test_304_headers","tornado/test/web_test.py::StatusReasonTest::test_status","tornado/test/web_test.py::DateHeaderTest::test_date_header","tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str","tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil","tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason","tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf","tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf","tornado/test/web_test.py::GzipTestCase::test_gzip","tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested","tornado/test/web_test.py::GzipTestCase::test_gzip_static","tornado/test/web_test.py::GzipTestCase::test_vary_already_present","tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple","tornado/test/web_test.py::PathArgsInPrepareTest::test_kw","tornado/test/web_test.py::PathArgsInPrepareTest::test_pos","tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies","tornado/test/web_test.py::ExceptionHandlerTest::test_http_error","tornado/test/web_test.py::ExceptionHandlerTest::test_known_error","tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error","tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception","tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method","tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error","tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception","tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties","tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy","tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works","tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works","tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods","tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other","tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch","tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods","tornado/test/web_test.py::PatchMethodTest::test_other","tornado/test/web_test.py::PatchMethodTest::test_patch","tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare","tornado/test/web_test.py::Default404Test::test_404","tornado/test/web_test.py::Custom404Test::test_404","tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403","tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name","tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload","tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return","tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data","tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body","tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body","tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body","tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body","tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body","tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body","tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body","tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high","tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low","tornado/test/web_test.py::ClientCloseTest::test_client_close","tornado/test/web_test.py::SignedValueTest::test_expired","tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval","tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key","tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key","tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key","tornado/test/web_test.py::SignedValueTest::test_known_values","tornado/test/web_test.py::SignedValueTest::test_name_swap","tornado/test/web_test.py::SignedValueTest::test_non_ascii","tornado/test/web_test.py::SignedValueTest::test_payload_tampering","tornado/test/web_test.py::SignedValueTest::test_signature_tampering","tornado/test/web_test.py::XSRFTest::test_cross_user","tornado/test/web_test.py::XSRFTest::test_distinct_tokens","tornado/test/web_test.py::XSRFTest::test_refresh_token","tornado/test/web_test.py::XSRFTest::test_versioning","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token","tornado/test/web_test.py::XSRFTest::test_xsrf_success_header","tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token","tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body","tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string","tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token","tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly","tornado/test/web_test.py::FinishExceptionTest::test_finish_exception","tornado/test/web_test.py::DecoratorTest::test_addslash","tornado/test/web_test.py::DecoratorTest::test_removeslash","tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match","tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match","tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match","tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match","tornado/test/web_test.py::CacheTest::test_strong_etag_match","tornado/test/web_test.py::CacheTest::test_strong_etag_not_match","tornado/test/web_test.py::CacheTest::test_weak_etag_match","tornado/test/web_test.py::CacheTest::test_weak_etag_not_match","tornado/test/web_test.py::CacheTest::test_wildcard_etag","tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip","tornado/test/web_test.py::HTTPErrorTest::test_copy","tornado/test/web_test.py::ApplicationTest::test_listen","tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible","tornado/test/web_test.py::URLSpecReverseTest::test_reverse","tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments","tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect","tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern","tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_appending_argument","tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_argument"],"string":"[\n \"tornado/test/autoreload_test.py::AutoreloadTest::test_reload_module\",\n \"tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes\",\n \"tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp\",\n \"tornado/test/web_test.py::SecureCookieV1Test::test_round_trip\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_round_trip\",\n \"tornado/test/web_test.py::CookieTest::test_cookie_special_char\",\n \"tornado/test/web_test.py::CookieTest::test_get_cookie\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_domain\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_max_age\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite\",\n \"tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect\",\n \"tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect\",\n \"tornado/test/web_test.py::ConnectionCloseTest::test_connection_close\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_error\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_group_encoding\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_slashes\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_get_argument\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_header_injection\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_multi_header\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_optional_path\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_redirect\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_types\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash\",\n \"tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush\",\n \"tornado/test/web_test.py::NonWSGIWebTests::test_flow_control\",\n \"tornado/test/web_test.py::ErrorResponseTest::test_default\",\n \"tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error\",\n \"tornado/test/web_test.py::ErrorResponseTest::test_write_error\",\n \"tornado/test/web_test.py::StaticFileTest::test_absolute_static_url\",\n \"tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion\",\n \"tornado/test/web_test.py::StaticFileTest::test_include_host_override\",\n \"tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection\",\n \"tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion\",\n \"tornado/test/web_test.py::StaticFileTest::test_root_static_path\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_304_etag_modified_bug\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_404\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_compressed_files\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_etag\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_files\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_head\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_head_range\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_invalid_range\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_url\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end\",\n \"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename\",\n \"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect\",\n \"tornado/test/web_test.py::StaticFileWithPathTest::test_serve\",\n \"tornado/test/web_test.py::CustomStaticFileTest::test_serve\",\n \"tornado/test/web_test.py::CustomStaticFileTest::test_static_url\",\n \"tornado/test/web_test.py::HostMatchingTest::test_host_matching\",\n \"tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching\",\n \"tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups\",\n \"tornado/test/web_test.py::ClearHeaderTest::test_clear_header\",\n \"tornado/test/web_test.py::Header204Test::test_204_headers\",\n \"tornado/test/web_test.py::Header304Test::test_304_headers\",\n \"tornado/test/web_test.py::StatusReasonTest::test_status\",\n \"tornado/test/web_test.py::DateHeaderTest::test_date_header\",\n \"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str\",\n \"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil\",\n \"tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason\",\n \"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf\",\n \"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf\",\n \"tornado/test/web_test.py::GzipTestCase::test_gzip\",\n \"tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested\",\n \"tornado/test/web_test.py::GzipTestCase::test_gzip_static\",\n \"tornado/test/web_test.py::GzipTestCase::test_vary_already_present\",\n \"tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple\",\n \"tornado/test/web_test.py::PathArgsInPrepareTest::test_kw\",\n \"tornado/test/web_test.py::PathArgsInPrepareTest::test_pos\",\n \"tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies\",\n \"tornado/test/web_test.py::ExceptionHandlerTest::test_http_error\",\n \"tornado/test/web_test.py::ExceptionHandlerTest::test_known_error\",\n \"tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error\",\n \"tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception\",\n \"tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method\",\n \"tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error\",\n \"tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception\",\n \"tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties\",\n \"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy\",\n \"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works\",\n \"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works\",\n \"tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods\",\n \"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other\",\n \"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch\",\n \"tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods\",\n \"tornado/test/web_test.py::PatchMethodTest::test_other\",\n \"tornado/test/web_test.py::PatchMethodTest::test_patch\",\n \"tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare\",\n \"tornado/test/web_test.py::Default404Test::test_404\",\n \"tornado/test/web_test.py::Custom404Test::test_404\",\n \"tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403\",\n \"tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body\",\n \"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body\",\n \"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body\",\n \"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body\",\n \"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body\",\n \"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body\",\n \"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body\",\n \"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high\",\n \"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low\",\n \"tornado/test/web_test.py::ClientCloseTest::test_client_close\",\n \"tornado/test/web_test.py::SignedValueTest::test_expired\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key\",\n \"tornado/test/web_test.py::SignedValueTest::test_known_values\",\n \"tornado/test/web_test.py::SignedValueTest::test_name_swap\",\n \"tornado/test/web_test.py::SignedValueTest::test_non_ascii\",\n \"tornado/test/web_test.py::SignedValueTest::test_payload_tampering\",\n \"tornado/test/web_test.py::SignedValueTest::test_signature_tampering\",\n \"tornado/test/web_test.py::XSRFTest::test_cross_user\",\n \"tornado/test/web_test.py::XSRFTest::test_distinct_tokens\",\n \"tornado/test/web_test.py::XSRFTest::test_refresh_token\",\n \"tornado/test/web_test.py::XSRFTest::test_versioning\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_header\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token\",\n \"tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly\",\n \"tornado/test/web_test.py::FinishExceptionTest::test_finish_exception\",\n \"tornado/test/web_test.py::DecoratorTest::test_addslash\",\n \"tornado/test/web_test.py::DecoratorTest::test_removeslash\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_strong_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_strong_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_weak_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_weak_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_wildcard_etag\",\n \"tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip\",\n \"tornado/test/web_test.py::HTTPErrorTest::test_copy\",\n \"tornado/test/web_test.py::ApplicationTest::test_listen\",\n \"tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible\",\n \"tornado/test/web_test.py::URLSpecReverseTest::test_reverse\",\n \"tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_appending_argument\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_argument\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2553,"string":"2,553"},"num_tokens_patch":{"kind":"number","value":1916,"string":"1,916"},"before_filepaths":{"kind":"list like","value":["tornado/autoreload.py","tornado/iostream.py","tornado/netutil.py","tornado/web.py"],"string":"[\n \"tornado/autoreload.py\",\n \"tornado/iostream.py\",\n \"tornado/netutil.py\",\n \"tornado/web.py\"\n]"}}},{"rowIdx":576,"cells":{"instance_id":{"kind":"string","value":"tornadoweb__tornado-2394"},"base_commit":{"kind":"string","value":"50800f37b72c7a401cd49c948cb5be85cabbafea"},"created_at":{"kind":"string","value":"2018-05-20 00:48:24"},"environment_setup_commit":{"kind":"string","value":"6410cd98c1a5e938246a17cac0769f689ed471c5"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/tornado/iostream.py b/tornado/iostream.py\nindex 89e1e234..63110a1a 100644\n--- a/tornado/iostream.py\n+++ b/tornado/iostream.py\n@@ -1410,13 +1410,7 @@ class IOStream(BaseIOStream):\n return future\n \n def _handle_connect(self):\n- try:\n- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)\n- except socket.error as e:\n- # Hurd doesn't allow SO_ERROR for loopback sockets because all\n- # errors for such sockets are reported synchronously.\n- if errno_from_exception(e) == errno.ENOPROTOOPT:\n- err = 0\n+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)\n if err != 0:\n self.error = socket.error(err, os.strerror(err))\n # IOLoop implementations may vary: some of them return\ndiff --git a/tornado/netutil.py b/tornado/netutil.py\nindex e63683ad..08c9d886 100644\n--- a/tornado/netutil.py\n+++ b/tornado/netutil.py\n@@ -138,12 +138,7 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC,\n raise\n set_close_exec(sock.fileno())\n if os.name != 'nt':\n- try:\n- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n- except socket.error as e:\n- if errno_from_exception(e) != errno.ENOPROTOOPT:\n- # Hurd doesn't support SO_REUSEADDR.\n- raise\n+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if reuse_port:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n if af == socket.AF_INET6:\n@@ -185,12 +180,7 @@ if hasattr(socket, 'AF_UNIX'):\n \"\"\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n set_close_exec(sock.fileno())\n- try:\n- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n- except socket.error as e:\n- if errno_from_exception(e) != errno.ENOPROTOOPT:\n- # Hurd doesn't support SO_REUSEADDR\n- raise\n+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setblocking(0)\n try:\n st = os.stat(file)\ndiff --git a/tornado/web.py b/tornado/web.py\nindex f970bd13..6760b0b9 100644\n--- a/tornado/web.py\n+++ b/tornado/web.py\n@@ -749,7 +749,18 @@ class RequestHandler(object):\n self._write_buffer.append(chunk)\n \n def render(self, template_name, **kwargs):\n- \"\"\"Renders the template with the given arguments as the response.\"\"\"\n+ \"\"\"Renders the template with the given arguments as the response.\n+\n+ ``render()`` calls ``finish()``, so no other output methods can be called\n+ after it.\n+\n+ Returns a `.Future` with the same semantics as the one returned by `finish`.\n+ Awaiting this `.Future` is optional.\n+\n+ .. versionchanged:: 5.1\n+\n+ Now returns a `.Future` instead of ``None``.\n+ \"\"\"\n if self._finished:\n raise RuntimeError(\"Cannot render() after finish()\")\n html = self.render_string(template_name, **kwargs)\n@@ -810,7 +821,7 @@ class RequestHandler(object):\n if html_bodies:\n hloc = html.index(b'')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n- self.finish(html)\n+ return self.finish(html)\n \n def render_linked_js(self, js_files):\n \"\"\"Default method used to render the final js links for the\n@@ -993,7 +1004,20 @@ class RequestHandler(object):\n return future\n \n def finish(self, chunk=None):\n- \"\"\"Finishes this response, ending the HTTP request.\"\"\"\n+ \"\"\"Finishes this response, ending the HTTP request.\n+\n+ Passing a ``chunk`` to ``finish()`` is equivalent to passing that\n+ chunk to ``write()`` and then calling ``finish()`` with no arguments.\n+\n+ Returns a `.Future` which may optionally be awaited to track the sending\n+ of the response to the client. This `.Future` resolves when all the response\n+ data has been sent, and raises an error if the connection is closed before all\n+ data can be sent.\n+\n+ .. versionchanged:: 5.1\n+\n+ Now returns a `.Future` instead of ``None``.\n+ \"\"\"\n if self._finished:\n raise RuntimeError(\"finish() called twice\")\n \n@@ -1025,12 +1049,13 @@ class RequestHandler(object):\n # are keepalive connections)\n self.request.connection.set_close_callback(None)\n \n- self.flush(include_footers=True)\n+ future = self.flush(include_footers=True)\n self.request.connection.finish()\n self._log()\n self._finished = True\n self.on_finish()\n self._break_cycles()\n+ return future\n \n def detach(self):\n \"\"\"Take control of the underlying stream.\n"},"problem_statement":{"kind":"string","value":"RequestHandler.finish should return a Future\n`RequestHandler.finish` may call `flush()`, which returns a Future, but this Future is simply discarded. The main reason for that Future is flow control in streaming responses, which is no longer relevant by the time we are closing the connection, but it also contains errors if the stream is closed while the response is streamed. This error will be logged as a stack trace if left uncaught, so some applications may wish to await their calls to `finish()` to be able to catch it. \r\n\r\nThis logic also extends to `render()`, which calls `finish()`. \r\n\r\nFrom https://github.com/tornadoweb/tornado/issues/2055#issuecomment-304456147"},"repo":{"kind":"string","value":"tornadoweb/tornado"},"test_patch":{"kind":"string","value":"diff --git a/tornado/test/web_test.py b/tornado/test/web_test.py\nindex 45072aac..b77311df 100644\n--- a/tornado/test/web_test.py\n+++ b/tornado/test/web_test.py\n@@ -191,6 +191,40 @@ class SecureCookieV2Test(unittest.TestCase):\n self.assertEqual(new_handler.get_secure_cookie('foo'), None)\n \n \n+class FinalReturnTest(WebTestCase):\n+ def get_handlers(self):\n+ test = self\n+\n+ class FinishHandler(RequestHandler):\n+ @gen.coroutine\n+ def get(self):\n+ test.final_return = self.finish()\n+\n+ class RenderHandler(RequestHandler):\n+ def create_template_loader(self, path):\n+ return DictLoader({'foo.html': 'hi'})\n+\n+ @gen.coroutine\n+ def get(self):\n+ test.final_return = self.render('foo.html')\n+\n+ return [(\"/finish\", FinishHandler),\n+ (\"/render\", RenderHandler)]\n+\n+ def get_app_kwargs(self):\n+ return dict(template_path='FinalReturnTest')\n+\n+ def test_finish_method_return_future(self):\n+ response = self.fetch(self.get_url('/finish'))\n+ self.assertEqual(response.code, 200)\n+ self.assertIsInstance(self.final_return, Future)\n+\n+ def test_render_method_return_future(self):\n+ response = self.fetch(self.get_url('/render'))\n+ self.assertEqual(response.code, 200)\n+ self.assertIsInstance(self.final_return, Future)\n+\n+\n class CookieTest(WebTestCase):\n def get_handlers(self):\n class SetCookieHandler(RequestHandler):\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 2\n },\n \"num_modified_files\": 3\n}"},"version":{"kind":"string","value":"5.0"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\",\n \"flake8\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work\ncertifi==2021.5.30\nflake8==5.0.4\nimportlib-metadata==4.2.0\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\nmccabe==0.7.0\nmore-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work\npackaging @ file:///tmp/build/80754af9/packaging_1637314298585/work\npluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work\npy @ file:///opt/conda/conda-bld/py_1644396412707/work\npycodestyle==2.9.1\npyflakes==2.5.0\npyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work\npytest==6.2.4\ntoml @ file:///tmp/build/80754af9/toml_1616166611790/work\n-e git+https://github.com/tornadoweb/tornado.git@50800f37b72c7a401cd49c948cb5be85cabbafea#egg=tornado\ntyping_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work\nzipp @ file:///tmp/build/80754af9/zipp_1633618647012/work\n"},"environment":{"kind":"string","value":"name: tornado\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - attrs=21.4.0=pyhd3eb1b0_0\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - importlib_metadata=4.8.1=hd3eb1b0_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - more-itertools=8.12.0=pyhd3eb1b0_0\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - packaging=21.3=pyhd3eb1b0_0\n - pip=21.2.2=py36h06a4308_0\n - pluggy=0.13.1=py36h06a4308_0\n - py=1.11.0=pyhd3eb1b0_0\n - pyparsing=3.0.4=pyhd3eb1b0_0\n - pytest=6.2.4=py36h06a4308_2\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - toml=0.10.2=pyhd3eb1b0_0\n - typing_extensions=4.1.1=pyh06a4308_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zipp=3.6.0=pyhd3eb1b0_0\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - flake8==5.0.4\n - importlib-metadata==4.2.0\n - mccabe==0.7.0\n - pycodestyle==2.9.1\n - pyflakes==2.5.0\nprefix: /opt/conda/envs/tornado\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tornado/test/web_test.py::FinalReturnTest::test_finish_method_return_future","tornado/test/web_test.py::FinalReturnTest::test_render_method_return_future"],"string":"[\n \"tornado/test/web_test.py::FinalReturnTest::test_finish_method_return_future\",\n \"tornado/test/web_test.py::FinalReturnTest::test_render_method_return_future\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes","tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp","tornado/test/web_test.py::SecureCookieV1Test::test_round_trip","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip","tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version","tornado/test/web_test.py::SecureCookieV2Test::test_round_trip","tornado/test/web_test.py::CookieTest::test_cookie_special_char","tornado/test/web_test.py::CookieTest::test_get_cookie","tornado/test/web_test.py::CookieTest::test_set_cookie","tornado/test/web_test.py::CookieTest::test_set_cookie_domain","tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days","tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags","tornado/test/web_test.py::CookieTest::test_set_cookie_max_age","tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite","tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect","tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect","tornado/test/web_test.py::ConnectionCloseTest::test_connection_close","tornado/test/web_test.py::RequestEncodingTest::test_error","tornado/test/web_test.py::RequestEncodingTest::test_group_encoding","tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark","tornado/test/web_test.py::RequestEncodingTest::test_slashes","tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument","tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode","tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus","tornado/test/web_test.py::WSGISafeWebTest::test_get_argument","tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments","tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments","tornado/test/web_test.py::WSGISafeWebTest::test_header_injection","tornado/test/web_test.py::WSGISafeWebTest::test_multi_header","tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip","tornado/test/web_test.py::WSGISafeWebTest::test_optional_path","tornado/test/web_test.py::WSGISafeWebTest::test_redirect","tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url","tornado/test/web_test.py::WSGISafeWebTest::test_types","tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources","tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped","tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect","tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash","tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush","tornado/test/web_test.py::NonWSGIWebTests::test_flow_control","tornado/test/web_test.py::ErrorResponseTest::test_default","tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error","tornado/test/web_test.py::ErrorResponseTest::test_write_error","tornado/test/web_test.py::StaticFileTest::test_absolute_static_url","tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion","tornado/test/web_test.py::StaticFileTest::test_include_host_override","tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection","tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion","tornado/test/web_test.py::StaticFileTest::test_root_static_path","tornado/test/web_test.py::StaticFileTest::test_static_304_etag_modified_bug","tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since","tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match","tornado/test/web_test.py::StaticFileTest::test_static_404","tornado/test/web_test.py::StaticFileTest::test_static_compressed_files","tornado/test/web_test.py::StaticFileTest::test_static_etag","tornado/test/web_test.py::StaticFileTest::test_static_files","tornado/test/web_test.py::StaticFileTest::test_static_head","tornado/test/web_test.py::StaticFileTest::test_static_head_range","tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch","tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone","tornado/test/web_test.py::StaticFileTest::test_static_invalid_range","tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match","tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start","tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix","tornado/test/web_test.py::StaticFileTest::test_static_url","tornado/test/web_test.py::StaticFileTest::test_static_with_range","tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge","tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file","tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end","tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end","tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end","tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename","tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect","tornado/test/web_test.py::StaticFileWithPathTest::test_serve","tornado/test/web_test.py::CustomStaticFileTest::test_serve","tornado/test/web_test.py::CustomStaticFileTest::test_static_url","tornado/test/web_test.py::HostMatchingTest::test_host_matching","tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching","tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups","tornado/test/web_test.py::ClearHeaderTest::test_clear_header","tornado/test/web_test.py::Header204Test::test_204_headers","tornado/test/web_test.py::Header304Test::test_304_headers","tornado/test/web_test.py::StatusReasonTest::test_status","tornado/test/web_test.py::DateHeaderTest::test_date_header","tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str","tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil","tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason","tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf","tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf","tornado/test/web_test.py::GzipTestCase::test_gzip","tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested","tornado/test/web_test.py::GzipTestCase::test_gzip_static","tornado/test/web_test.py::GzipTestCase::test_vary_already_present","tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple","tornado/test/web_test.py::PathArgsInPrepareTest::test_kw","tornado/test/web_test.py::PathArgsInPrepareTest::test_pos","tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies","tornado/test/web_test.py::ExceptionHandlerTest::test_http_error","tornado/test/web_test.py::ExceptionHandlerTest::test_known_error","tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error","tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception","tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method","tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error","tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception","tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties","tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy","tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works","tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works","tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods","tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other","tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch","tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods","tornado/test/web_test.py::PatchMethodTest::test_other","tornado/test/web_test.py::PatchMethodTest::test_patch","tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare","tornado/test/web_test.py::Default404Test::test_404","tornado/test/web_test.py::Custom404Test::test_404","tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403","tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name","tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload","tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return","tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data","tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body","tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body","tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body","tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body","tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body","tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body","tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body","tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high","tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low","tornado/test/web_test.py::ClientCloseTest::test_client_close","tornado/test/web_test.py::SignedValueTest::test_expired","tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval","tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key","tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key","tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key","tornado/test/web_test.py::SignedValueTest::test_known_values","tornado/test/web_test.py::SignedValueTest::test_name_swap","tornado/test/web_test.py::SignedValueTest::test_non_ascii","tornado/test/web_test.py::SignedValueTest::test_payload_tampering","tornado/test/web_test.py::SignedValueTest::test_signature_tampering","tornado/test/web_test.py::XSRFTest::test_cross_user","tornado/test/web_test.py::XSRFTest::test_distinct_tokens","tornado/test/web_test.py::XSRFTest::test_refresh_token","tornado/test/web_test.py::XSRFTest::test_versioning","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body","tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token","tornado/test/web_test.py::XSRFTest::test_xsrf_success_header","tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token","tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body","tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string","tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token","tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly","tornado/test/web_test.py::FinishExceptionTest::test_finish_exception","tornado/test/web_test.py::DecoratorTest::test_addslash","tornado/test/web_test.py::DecoratorTest::test_removeslash","tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match","tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match","tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match","tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match","tornado/test/web_test.py::CacheTest::test_strong_etag_match","tornado/test/web_test.py::CacheTest::test_strong_etag_not_match","tornado/test/web_test.py::CacheTest::test_weak_etag_match","tornado/test/web_test.py::CacheTest::test_weak_etag_not_match","tornado/test/web_test.py::CacheTest::test_wildcard_etag","tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip","tornado/test/web_test.py::HTTPErrorTest::test_copy","tornado/test/web_test.py::ApplicationTest::test_listen","tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible","tornado/test/web_test.py::URLSpecReverseTest::test_reverse","tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments","tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect","tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern","tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_appending_argument","tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_argument"],"string":"[\n \"tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes\",\n \"tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp\",\n \"tornado/test/web_test.py::SecureCookieV1Test::test_round_trip\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version\",\n \"tornado/test/web_test.py::SecureCookieV2Test::test_round_trip\",\n \"tornado/test/web_test.py::CookieTest::test_cookie_special_char\",\n \"tornado/test/web_test.py::CookieTest::test_get_cookie\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_domain\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_max_age\",\n \"tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite\",\n \"tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect\",\n \"tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect\",\n \"tornado/test/web_test.py::ConnectionCloseTest::test_connection_close\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_error\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_group_encoding\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark\",\n \"tornado/test/web_test.py::RequestEncodingTest::test_slashes\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_get_argument\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_header_injection\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_multi_header\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_optional_path\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_redirect\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_types\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect\",\n \"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash\",\n \"tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush\",\n \"tornado/test/web_test.py::NonWSGIWebTests::test_flow_control\",\n \"tornado/test/web_test.py::ErrorResponseTest::test_default\",\n \"tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error\",\n \"tornado/test/web_test.py::ErrorResponseTest::test_write_error\",\n \"tornado/test/web_test.py::StaticFileTest::test_absolute_static_url\",\n \"tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion\",\n \"tornado/test/web_test.py::StaticFileTest::test_include_host_override\",\n \"tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection\",\n \"tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion\",\n \"tornado/test/web_test.py::StaticFileTest::test_root_static_path\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_304_etag_modified_bug\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_404\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_compressed_files\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_etag\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_files\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_head\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_head_range\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_invalid_range\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_url\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end\",\n \"tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end\",\n \"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename\",\n \"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect\",\n \"tornado/test/web_test.py::StaticFileWithPathTest::test_serve\",\n \"tornado/test/web_test.py::CustomStaticFileTest::test_serve\",\n \"tornado/test/web_test.py::CustomStaticFileTest::test_static_url\",\n \"tornado/test/web_test.py::HostMatchingTest::test_host_matching\",\n \"tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching\",\n \"tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups\",\n \"tornado/test/web_test.py::ClearHeaderTest::test_clear_header\",\n \"tornado/test/web_test.py::Header204Test::test_204_headers\",\n \"tornado/test/web_test.py::Header304Test::test_304_headers\",\n \"tornado/test/web_test.py::StatusReasonTest::test_status\",\n \"tornado/test/web_test.py::DateHeaderTest::test_date_header\",\n \"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str\",\n \"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil\",\n \"tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason\",\n \"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf\",\n \"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf\",\n \"tornado/test/web_test.py::GzipTestCase::test_gzip\",\n \"tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested\",\n \"tornado/test/web_test.py::GzipTestCase::test_gzip_static\",\n \"tornado/test/web_test.py::GzipTestCase::test_vary_already_present\",\n \"tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple\",\n \"tornado/test/web_test.py::PathArgsInPrepareTest::test_kw\",\n \"tornado/test/web_test.py::PathArgsInPrepareTest::test_pos\",\n \"tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies\",\n \"tornado/test/web_test.py::ExceptionHandlerTest::test_http_error\",\n \"tornado/test/web_test.py::ExceptionHandlerTest::test_known_error\",\n \"tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error\",\n \"tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception\",\n \"tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method\",\n \"tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error\",\n \"tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception\",\n \"tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties\",\n \"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy\",\n \"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works\",\n \"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works\",\n \"tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods\",\n \"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other\",\n \"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch\",\n \"tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods\",\n \"tornado/test/web_test.py::PatchMethodTest::test_other\",\n \"tornado/test/web_test.py::PatchMethodTest::test_patch\",\n \"tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare\",\n \"tornado/test/web_test.py::Default404Test::test_404\",\n \"tornado/test/web_test.py::Custom404Test::test_404\",\n \"tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403\",\n \"tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data\",\n \"tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body\",\n \"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body\",\n \"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body\",\n \"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body\",\n \"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body\",\n \"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body\",\n \"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body\",\n \"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high\",\n \"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low\",\n \"tornado/test/web_test.py::ClientCloseTest::test_client_close\",\n \"tornado/test/web_test.py::SignedValueTest::test_expired\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key\",\n \"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key\",\n \"tornado/test/web_test.py::SignedValueTest::test_known_values\",\n \"tornado/test/web_test.py::SignedValueTest::test_name_swap\",\n \"tornado/test/web_test.py::SignedValueTest::test_non_ascii\",\n \"tornado/test/web_test.py::SignedValueTest::test_payload_tampering\",\n \"tornado/test/web_test.py::SignedValueTest::test_signature_tampering\",\n \"tornado/test/web_test.py::XSRFTest::test_cross_user\",\n \"tornado/test/web_test.py::XSRFTest::test_distinct_tokens\",\n \"tornado/test/web_test.py::XSRFTest::test_refresh_token\",\n \"tornado/test/web_test.py::XSRFTest::test_versioning\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_header\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string\",\n \"tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token\",\n \"tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly\",\n \"tornado/test/web_test.py::FinishExceptionTest::test_finish_exception\",\n \"tornado/test/web_test.py::DecoratorTest::test_addslash\",\n \"tornado/test/web_test.py::DecoratorTest::test_removeslash\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_strong_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_strong_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_weak_etag_match\",\n \"tornado/test/web_test.py::CacheTest::test_weak_etag_not_match\",\n \"tornado/test/web_test.py::CacheTest::test_wildcard_etag\",\n \"tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip\",\n \"tornado/test/web_test.py::HTTPErrorTest::test_copy\",\n \"tornado/test/web_test.py::ApplicationTest::test_listen\",\n \"tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible\",\n \"tornado/test/web_test.py::URLSpecReverseTest::test_reverse\",\n \"tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_appending_argument\",\n \"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_argument\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2554,"string":"2,554"},"num_tokens_patch":{"kind":"number","value":1235,"string":"1,235"},"before_filepaths":{"kind":"list like","value":["tornado/iostream.py","tornado/netutil.py","tornado/web.py"],"string":"[\n \"tornado/iostream.py\",\n \"tornado/netutil.py\",\n \"tornado/web.py\"\n]"}}},{"rowIdx":577,"cells":{"instance_id":{"kind":"string","value":"marshmallow-code__marshmallow-821"},"base_commit":{"kind":"string","value":"bfc6bedf291bb54f8623acc9380139c06bc8acb2"},"created_at":{"kind":"string","value":"2018-05-20 02:24:22"},"environment_setup_commit":{"kind":"string","value":"8e217c8d6fefb7049ab3389f31a8d35824fa2d96"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/marshmallow/fields.py b/marshmallow/fields.py\nindex ecfd28d4..737fbfb0 100755\n--- a/marshmallow/fields.py\n+++ b/marshmallow/fields.py\n@@ -1134,6 +1134,15 @@ class Dict(Field):\n 'marshmallow.base.FieldABC')\n self.key_container = keys\n \n+ def _add_to_schema(self, field_name, schema):\n+ super(Dict, self)._add_to_schema(field_name, schema)\n+ if self.value_container:\n+ self.value_container.parent = self\n+ self.value_container.name = field_name\n+ if self.key_container:\n+ self.key_container.parent = self\n+ self.key_container.name = field_name\n+\n def _serialize(self, value, attr, obj):\n if value is None:\n return None\n"},"problem_statement":{"kind":"string","value":"Question: How can I pass the context in a nested field of a structured dict?\nI noticed that if you use a nested field for values in a structured Dict, the context is not automatically given to the nested schema. Is there a way to pass it the context?\r\n\r\nExample:\r\n```python\r\nclass Inner(Schema):\r\n foo = fields.String()\r\n\r\n @validates('foo')\r\n def validate_foo(self, value):\r\n if 'foo_context' not in self.context:\r\n raise ValidationError('no context!')\r\n\r\n\r\nclass Outer(Schema):\r\n bar = fields.Dict(values=fields.Nested(Inner))\r\n\r\n# gives no error:\r\nInner(context={'foo_context': 'foo'}).load({'foo': 'some foo'})\r\n# gives 'no context!' error:\r\nOuter(context={'foo_context': 'foo'}).load({'bar': { 'key': {'foo': 'some foo'}}})\r\n```"},"repo":{"kind":"string","value":"marshmallow-code/marshmallow"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_schema.py b/tests/test_schema.py\nindex 17c04300..9fee0d63 100755\n--- a/tests/test_schema.py\n+++ b/tests/test_schema.py\n@@ -2134,6 +2134,27 @@ class TestContext:\n outer.context['foo_context'] = 'foo'\n assert outer.load({'bars': [{'foo': 42}]})\n \n+ # Regression test for https://github.com/marshmallow-code/marshmallow/issues/820\n+ def test_nested_dict_fields_inherit_context(self):\n+ class InnerSchema(Schema):\n+ foo = fields.Field()\n+\n+ @validates('foo')\n+ def validate_foo(self, value):\n+ if 'foo_context' not in self.context:\n+ raise ValidationError('Missing context')\n+\n+ class OuterSchema(Schema):\n+ bars = fields.Dict(values=fields.Nested(InnerSchema()))\n+\n+ inner = InnerSchema()\n+ inner.context['foo_context'] = 'foo'\n+ assert inner.load({'foo': 42})\n+\n+ outer = OuterSchema()\n+ outer.context['foo_context'] = 'foo'\n+ assert outer.load({'bars': {'test': {'foo': 42}}})\n+\n \n def test_serializer_can_specify_nested_object_as_attribute(blog):\n class BlogUsernameSchema(Schema):\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [],\n \"has_test_patch\": true,\n \"is_lite\": true,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 0\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"3.0"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[reco]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-mock\",\n \"pytest-asyncio\"\n ],\n \"pre_install\": null,\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"dev-requirements.txt\",\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs==25.3.0\ncoverage==7.8.0\ndistlib==0.3.9\nexceptiongroup==1.2.2\nexecnet==2.1.1\nfilelock==3.18.0\nflake8==3.5.0\niniconfig==2.1.0\ninvoke==1.0.0\n-e git+https://github.com/marshmallow-code/marshmallow.git@bfc6bedf291bb54f8623acc9380139c06bc8acb2#egg=marshmallow\nmccabe==0.6.1\nmore-itertools==10.6.0\npackaging==24.2\nplatformdirs==4.3.7\npluggy==1.5.0\npy==1.11.0\npycodestyle==2.3.1\npyflakes==1.6.0\npytest==8.3.5\npytest-asyncio==0.26.0\npytest-cov==6.0.0\npytest-mock==3.14.0\npytest-xdist==3.6.1\npython-dateutil==2.7.3\npytz==2018.4\nsimplejson==3.15.0\nsix==1.17.0\ntoml==0.10.2\ntomli==2.2.1\ntox==3.12.1\ntyping_extensions==4.13.0\nvirtualenv==20.29.3\n"},"environment":{"kind":"string","value":"name: marshmallow\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - attrs==25.3.0\n - coverage==7.8.0\n - distlib==0.3.9\n - exceptiongroup==1.2.2\n - execnet==2.1.1\n - filelock==3.18.0\n - flake8==3.5.0\n - iniconfig==2.1.0\n - invoke==1.0.0\n - mccabe==0.6.1\n - more-itertools==10.6.0\n - packaging==24.2\n - platformdirs==4.3.7\n - pluggy==1.5.0\n - py==1.11.0\n - pycodestyle==2.3.1\n - pyflakes==1.6.0\n - pytest==8.3.5\n - pytest-asyncio==0.26.0\n - pytest-cov==6.0.0\n - pytest-mock==3.14.0\n - pytest-xdist==3.6.1\n - python-dateutil==2.7.3\n - pytz==2018.4\n - simplejson==3.15.0\n - six==1.17.0\n - toml==0.10.2\n - tomli==2.2.1\n - tox==3.12.1\n - typing-extensions==4.13.0\n - virtualenv==20.29.3\nprefix: /opt/conda/envs/marshmallow\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_schema.py::TestContext::test_nested_dict_fields_inherit_context"],"string":"[\n \"tests/test_schema.py::TestContext::test_nested_dict_fields_inherit_context\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_schema.py::test_serializing_basic_object[UserSchema]","tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]","tests/test_schema.py::test_serializer_dump","tests/test_schema.py::test_dump_raises_with_dict_of_errors","tests/test_schema.py::test_dump_mode_raises_error[UserSchema]","tests/test_schema.py::test_dump_mode_raises_error[UserMetaSchema]","tests/test_schema.py::test_dump_resets_errors","tests/test_schema.py::test_load_resets_errors","tests/test_schema.py::test_load_validation_error_stores_input_data_and_valid_data","tests/test_schema.py::test_dump_validation_error_stores_partially_valid_data","tests/test_schema.py::test_dump_resets_error_fields","tests/test_schema.py::test_load_resets_error_fields","tests/test_schema.py::test_load_resets_error_kwargs","tests/test_schema.py::test_errored_fields_do_not_appear_in_output","tests/test_schema.py::test_load_many_stores_error_indices","tests/test_schema.py::test_dump_many","tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index","tests/test_schema.py::test_dump_many_stores_error_indices","tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false","tests/test_schema.py::test_dump_returns_a_dict","tests/test_schema.py::test_dumps_returns_a_string","tests/test_schema.py::test_dumping_single_object_with_collection_schema","tests/test_schema.py::test_loading_single_object_with_collection_schema","tests/test_schema.py::test_dumps_many","tests/test_schema.py::test_load_returns_an_object","tests/test_schema.py::test_load_many","tests/test_schema.py::test_loads_returns_a_user","tests/test_schema.py::test_loads_many","tests/test_schema.py::test_loads_deserializes_from_json","tests/test_schema.py::test_serializing_none","tests/test_schema.py::test_default_many_symmetry","tests/test_schema.py::test_on_bind_field_hook","tests/test_schema.py::test_nested_on_bind_field_hook","tests/test_schema.py::TestValidate::test_validate_raises_with_errors_dict","tests/test_schema.py::TestValidate::test_validate_many","tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false","tests/test_schema.py::TestValidate::test_validate","tests/test_schema.py::TestValidate::test_validate_required","tests/test_schema.py::test_fields_are_not_copies[UserSchema]","tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]","tests/test_schema.py::test_dumps_returns_json","tests/test_schema.py::test_naive_datetime_field","tests/test_schema.py::test_datetime_formatted_field","tests/test_schema.py::test_datetime_iso_field","tests/test_schema.py::test_tz_datetime_field","tests/test_schema.py::test_local_datetime_field","tests/test_schema.py::test_class_variable","tests/test_schema.py::test_serialize_many[UserSchema]","tests/test_schema.py::test_serialize_many[UserMetaSchema]","tests/test_schema.py::test_inheriting_schema","tests/test_schema.py::test_custom_field","tests/test_schema.py::test_url_field","tests/test_schema.py::test_relative_url_field","tests/test_schema.py::test_stores_invalid_url_error[UserSchema]","tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]","tests/test_schema.py::test_email_field[UserSchema]","tests/test_schema.py::test_email_field[UserMetaSchema]","tests/test_schema.py::test_stored_invalid_email","tests/test_schema.py::test_integer_field","tests/test_schema.py::test_as_string","tests/test_schema.py::test_method_field[UserSchema]","tests/test_schema.py::test_method_field[UserMetaSchema]","tests/test_schema.py::test_function_field","tests/test_schema.py::test_prefix[UserSchema]","tests/test_schema.py::test_prefix[UserMetaSchema]","tests/test_schema.py::test_fields_must_be_declared_as_instances","tests/test_schema.py::test_serializing_generator[UserSchema]","tests/test_schema.py::test_serializing_generator[UserMetaSchema]","tests/test_schema.py::test_serializing_empty_list_returns_empty_list","tests/test_schema.py::test_serializing_dict","tests/test_schema.py::test_serializing_dict_with_meta_fields","tests/test_schema.py::test_exclude_in_init[UserSchema]","tests/test_schema.py::test_exclude_in_init[UserMetaSchema]","tests/test_schema.py::test_only_in_init[UserSchema]","tests/test_schema.py::test_only_in_init[UserMetaSchema]","tests/test_schema.py::test_invalid_only_param","tests/test_schema.py::test_can_serialize_uuid","tests/test_schema.py::test_can_serialize_time","tests/test_schema.py::test_invalid_time","tests/test_schema.py::test_invalid_date","tests/test_schema.py::test_invalid_dict_but_okay","tests/test_schema.py::test_json_module_is_deprecated","tests/test_schema.py::test_render_module","tests/test_schema.py::test_custom_error_message","tests/test_schema.py::test_load_errors_with_many","tests/test_schema.py::test_error_raised_if_fields_option_is_not_list","tests/test_schema.py::test_error_raised_if_additional_option_is_not_list","tests/test_schema.py::test_nested_custom_set_in_exclude_reusing_schema","tests/test_schema.py::test_nested_only","tests/test_schema.py::test_nested_only_inheritance","tests/test_schema.py::test_nested_only_empty_inheritance","tests/test_schema.py::test_nested_exclude","tests/test_schema.py::test_nested_exclude_inheritance","tests/test_schema.py::test_nested_only_and_exclude","tests/test_schema.py::test_nested_only_then_exclude_inheritance","tests/test_schema.py::test_nested_exclude_then_only_inheritance","tests/test_schema.py::test_nested_exclude_and_only_inheritance","tests/test_schema.py::test_meta_nested_exclude","tests/test_schema.py::test_nested_custom_set_not_implementing_getitem","tests/test_schema.py::test_deeply_nested_only_and_exclude","tests/test_schema.py::TestDeeplyNestedLoadOnly::test_load_only","tests/test_schema.py::TestDeeplyNestedLoadOnly::test_dump_only","tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_load_only","tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_dump_only","tests/test_schema.py::test_nested_constructor_only_and_exclude","tests/test_schema.py::test_only_and_exclude","tests/test_schema.py::test_exclude_invalid_attribute","tests/test_schema.py::test_only_with_invalid_attribute","tests/test_schema.py::test_only_bounded_by_fields","tests/test_schema.py::test_only_empty","tests/test_schema.py::test_nested_with_sets","tests/test_schema.py::test_meta_serializer_fields","tests/test_schema.py::test_meta_fields_mapping","tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error","tests/test_schema.py::test_exclude_fields","tests/test_schema.py::test_fields_option_must_be_list_or_tuple","tests/test_schema.py::test_exclude_option_must_be_list_or_tuple","tests/test_schema.py::test_dateformat_option","tests/test_schema.py::test_default_dateformat","tests/test_schema.py::test_inherit_meta","tests/test_schema.py::test_inherit_meta_override","tests/test_schema.py::test_additional","tests/test_schema.py::test_cant_set_both_additional_and_fields","tests/test_schema.py::test_serializing_none_meta","tests/test_schema.py::TestHandleError::test_dump_with_custom_error_handler","tests/test_schema.py::TestHandleError::test_load_with_custom_error_handler","tests/test_schema.py::TestHandleError::test_load_with_custom_error_handler_and_partially_valid_data","tests/test_schema.py::TestHandleError::test_custom_error_handler_with_validates_decorator","tests/test_schema.py::TestHandleError::test_custom_error_handler_with_validates_schema_decorator","tests/test_schema.py::TestHandleError::test_validate_with_custom_error_handler","tests/test_schema.py::TestFieldValidation::test_errors_are_cleared_after_loading_collection","tests/test_schema.py::TestFieldValidation::test_raises_error_with_list","tests/test_schema.py::TestFieldValidation::test_raises_error_with_dict","tests/test_schema.py::TestFieldValidation::test_ignored_if_not_in_only","tests/test_schema.py::test_schema_repr","tests/test_schema.py::TestNestedSchema::test_flat_nested","tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute","tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none","tests/test_schema.py::TestNestedSchema::test_flat_nested2","tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required","tests/test_schema.py::TestNestedSchema::test_nested_none","tests/test_schema.py::TestNestedSchema::test_nested","tests/test_schema.py::TestNestedSchema::test_nested_many_fields","tests/test_schema.py::TestNestedSchema::test_nested_meta_many","tests/test_schema.py::TestNestedSchema::test_nested_only","tests/test_schema.py::TestNestedSchema::test_exclude","tests/test_schema.py::TestNestedSchema::test_list_field","tests/test_schema.py::TestNestedSchema::test_nested_load_many","tests/test_schema.py::TestNestedSchema::test_nested_errors","tests/test_schema.py::TestNestedSchema::test_nested_dump_errors","tests/test_schema.py::TestNestedSchema::test_nested_dump","tests/test_schema.py::TestNestedSchema::test_nested_method_field","tests/test_schema.py::TestNestedSchema::test_nested_function_field","tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field","tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field","tests/test_schema.py::TestNestedSchema::test_invalid_float_field","tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields","tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields","tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer","tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_field","tests/test_schema.py::TestNestedSchema::test_all_errors_on_many_nested_field_with_validates_decorator","tests/test_schema.py::TestNestedSchema::test_dump_validation_error","tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself","tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name","tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta","tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param","tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields","tests/test_schema.py::TestSelfReference::test_nested_many","tests/test_schema.py::test_serialization_with_required_field","tests/test_schema.py::test_deserialization_with_required_field","tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator","tests/test_schema.py::TestContext::test_context_method","tests/test_schema.py::TestContext::test_context_method_function","tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available","tests/test_schema.py::TestContext::test_function_field_handles_bound_serializer","tests/test_schema.py::TestContext::test_fields_context","tests/test_schema.py::TestContext::test_nested_fields_inherit_context","tests/test_schema.py::TestContext::test_nested_list_fields_inherit_context","tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute","tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass","tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass","tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro","tests/test_schema.py::TestGetAttribute::test_get_attribute_is_used","tests/test_schema.py::TestGetAttribute::test_get_attribute_with_many","tests/test_schema.py::TestRequiredFields::test_required_string_field_missing","tests/test_schema.py::TestRequiredFields::test_required_string_field_failure","tests/test_schema.py::TestRequiredFields::test_allow_none_param","tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message","tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output","tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none","tests/test_schema.py::TestDefaults::test_default_and_value_missing","tests/test_schema.py::TestDefaults::test_loading_none","tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output","tests/test_schema.py::TestLoadOnly::test_load_only","tests/test_schema.py::TestLoadOnly::test_dump_only","tests/test_schema.py::TestLoadOnly::test_url_field_requre_tld_false"],"string":"[\n \"tests/test_schema.py::test_serializing_basic_object[UserSchema]\",\n \"tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]\",\n \"tests/test_schema.py::test_serializer_dump\",\n \"tests/test_schema.py::test_dump_raises_with_dict_of_errors\",\n \"tests/test_schema.py::test_dump_mode_raises_error[UserSchema]\",\n \"tests/test_schema.py::test_dump_mode_raises_error[UserMetaSchema]\",\n \"tests/test_schema.py::test_dump_resets_errors\",\n \"tests/test_schema.py::test_load_resets_errors\",\n \"tests/test_schema.py::test_load_validation_error_stores_input_data_and_valid_data\",\n \"tests/test_schema.py::test_dump_validation_error_stores_partially_valid_data\",\n \"tests/test_schema.py::test_dump_resets_error_fields\",\n \"tests/test_schema.py::test_load_resets_error_fields\",\n \"tests/test_schema.py::test_load_resets_error_kwargs\",\n \"tests/test_schema.py::test_errored_fields_do_not_appear_in_output\",\n \"tests/test_schema.py::test_load_many_stores_error_indices\",\n \"tests/test_schema.py::test_dump_many\",\n \"tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index\",\n \"tests/test_schema.py::test_dump_many_stores_error_indices\",\n \"tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false\",\n \"tests/test_schema.py::test_dump_returns_a_dict\",\n \"tests/test_schema.py::test_dumps_returns_a_string\",\n \"tests/test_schema.py::test_dumping_single_object_with_collection_schema\",\n \"tests/test_schema.py::test_loading_single_object_with_collection_schema\",\n \"tests/test_schema.py::test_dumps_many\",\n \"tests/test_schema.py::test_load_returns_an_object\",\n \"tests/test_schema.py::test_load_many\",\n \"tests/test_schema.py::test_loads_returns_a_user\",\n \"tests/test_schema.py::test_loads_many\",\n \"tests/test_schema.py::test_loads_deserializes_from_json\",\n \"tests/test_schema.py::test_serializing_none\",\n \"tests/test_schema.py::test_default_many_symmetry\",\n \"tests/test_schema.py::test_on_bind_field_hook\",\n \"tests/test_schema.py::test_nested_on_bind_field_hook\",\n \"tests/test_schema.py::TestValidate::test_validate_raises_with_errors_dict\",\n \"tests/test_schema.py::TestValidate::test_validate_many\",\n \"tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false\",\n \"tests/test_schema.py::TestValidate::test_validate\",\n \"tests/test_schema.py::TestValidate::test_validate_required\",\n \"tests/test_schema.py::test_fields_are_not_copies[UserSchema]\",\n \"tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]\",\n \"tests/test_schema.py::test_dumps_returns_json\",\n \"tests/test_schema.py::test_naive_datetime_field\",\n \"tests/test_schema.py::test_datetime_formatted_field\",\n \"tests/test_schema.py::test_datetime_iso_field\",\n \"tests/test_schema.py::test_tz_datetime_field\",\n \"tests/test_schema.py::test_local_datetime_field\",\n \"tests/test_schema.py::test_class_variable\",\n \"tests/test_schema.py::test_serialize_many[UserSchema]\",\n \"tests/test_schema.py::test_serialize_many[UserMetaSchema]\",\n \"tests/test_schema.py::test_inheriting_schema\",\n \"tests/test_schema.py::test_custom_field\",\n \"tests/test_schema.py::test_url_field\",\n \"tests/test_schema.py::test_relative_url_field\",\n \"tests/test_schema.py::test_stores_invalid_url_error[UserSchema]\",\n \"tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]\",\n \"tests/test_schema.py::test_email_field[UserSchema]\",\n \"tests/test_schema.py::test_email_field[UserMetaSchema]\",\n \"tests/test_schema.py::test_stored_invalid_email\",\n \"tests/test_schema.py::test_integer_field\",\n \"tests/test_schema.py::test_as_string\",\n \"tests/test_schema.py::test_method_field[UserSchema]\",\n \"tests/test_schema.py::test_method_field[UserMetaSchema]\",\n \"tests/test_schema.py::test_function_field\",\n \"tests/test_schema.py::test_prefix[UserSchema]\",\n \"tests/test_schema.py::test_prefix[UserMetaSchema]\",\n \"tests/test_schema.py::test_fields_must_be_declared_as_instances\",\n \"tests/test_schema.py::test_serializing_generator[UserSchema]\",\n \"tests/test_schema.py::test_serializing_generator[UserMetaSchema]\",\n \"tests/test_schema.py::test_serializing_empty_list_returns_empty_list\",\n \"tests/test_schema.py::test_serializing_dict\",\n \"tests/test_schema.py::test_serializing_dict_with_meta_fields\",\n \"tests/test_schema.py::test_exclude_in_init[UserSchema]\",\n \"tests/test_schema.py::test_exclude_in_init[UserMetaSchema]\",\n \"tests/test_schema.py::test_only_in_init[UserSchema]\",\n \"tests/test_schema.py::test_only_in_init[UserMetaSchema]\",\n \"tests/test_schema.py::test_invalid_only_param\",\n \"tests/test_schema.py::test_can_serialize_uuid\",\n \"tests/test_schema.py::test_can_serialize_time\",\n \"tests/test_schema.py::test_invalid_time\",\n \"tests/test_schema.py::test_invalid_date\",\n \"tests/test_schema.py::test_invalid_dict_but_okay\",\n \"tests/test_schema.py::test_json_module_is_deprecated\",\n \"tests/test_schema.py::test_render_module\",\n \"tests/test_schema.py::test_custom_error_message\",\n \"tests/test_schema.py::test_load_errors_with_many\",\n \"tests/test_schema.py::test_error_raised_if_fields_option_is_not_list\",\n \"tests/test_schema.py::test_error_raised_if_additional_option_is_not_list\",\n \"tests/test_schema.py::test_nested_custom_set_in_exclude_reusing_schema\",\n \"tests/test_schema.py::test_nested_only\",\n \"tests/test_schema.py::test_nested_only_inheritance\",\n \"tests/test_schema.py::test_nested_only_empty_inheritance\",\n \"tests/test_schema.py::test_nested_exclude\",\n \"tests/test_schema.py::test_nested_exclude_inheritance\",\n \"tests/test_schema.py::test_nested_only_and_exclude\",\n \"tests/test_schema.py::test_nested_only_then_exclude_inheritance\",\n \"tests/test_schema.py::test_nested_exclude_then_only_inheritance\",\n \"tests/test_schema.py::test_nested_exclude_and_only_inheritance\",\n \"tests/test_schema.py::test_meta_nested_exclude\",\n \"tests/test_schema.py::test_nested_custom_set_not_implementing_getitem\",\n \"tests/test_schema.py::test_deeply_nested_only_and_exclude\",\n \"tests/test_schema.py::TestDeeplyNestedLoadOnly::test_load_only\",\n \"tests/test_schema.py::TestDeeplyNestedLoadOnly::test_dump_only\",\n \"tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_load_only\",\n \"tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_dump_only\",\n \"tests/test_schema.py::test_nested_constructor_only_and_exclude\",\n \"tests/test_schema.py::test_only_and_exclude\",\n \"tests/test_schema.py::test_exclude_invalid_attribute\",\n \"tests/test_schema.py::test_only_with_invalid_attribute\",\n \"tests/test_schema.py::test_only_bounded_by_fields\",\n \"tests/test_schema.py::test_only_empty\",\n \"tests/test_schema.py::test_nested_with_sets\",\n \"tests/test_schema.py::test_meta_serializer_fields\",\n \"tests/test_schema.py::test_meta_fields_mapping\",\n \"tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error\",\n \"tests/test_schema.py::test_exclude_fields\",\n \"tests/test_schema.py::test_fields_option_must_be_list_or_tuple\",\n \"tests/test_schema.py::test_exclude_option_must_be_list_or_tuple\",\n \"tests/test_schema.py::test_dateformat_option\",\n \"tests/test_schema.py::test_default_dateformat\",\n \"tests/test_schema.py::test_inherit_meta\",\n \"tests/test_schema.py::test_inherit_meta_override\",\n \"tests/test_schema.py::test_additional\",\n \"tests/test_schema.py::test_cant_set_both_additional_and_fields\",\n \"tests/test_schema.py::test_serializing_none_meta\",\n \"tests/test_schema.py::TestHandleError::test_dump_with_custom_error_handler\",\n \"tests/test_schema.py::TestHandleError::test_load_with_custom_error_handler\",\n \"tests/test_schema.py::TestHandleError::test_load_with_custom_error_handler_and_partially_valid_data\",\n \"tests/test_schema.py::TestHandleError::test_custom_error_handler_with_validates_decorator\",\n \"tests/test_schema.py::TestHandleError::test_custom_error_handler_with_validates_schema_decorator\",\n \"tests/test_schema.py::TestHandleError::test_validate_with_custom_error_handler\",\n \"tests/test_schema.py::TestFieldValidation::test_errors_are_cleared_after_loading_collection\",\n \"tests/test_schema.py::TestFieldValidation::test_raises_error_with_list\",\n \"tests/test_schema.py::TestFieldValidation::test_raises_error_with_dict\",\n \"tests/test_schema.py::TestFieldValidation::test_ignored_if_not_in_only\",\n \"tests/test_schema.py::test_schema_repr\",\n \"tests/test_schema.py::TestNestedSchema::test_flat_nested\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none\",\n \"tests/test_schema.py::TestNestedSchema::test_flat_nested2\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_none\",\n \"tests/test_schema.py::TestNestedSchema::test_nested\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_many_fields\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_meta_many\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_only\",\n \"tests/test_schema.py::TestNestedSchema::test_exclude\",\n \"tests/test_schema.py::TestNestedSchema::test_list_field\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_load_many\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_errors\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_dump_errors\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_dump\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_method_field\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_function_field\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field\",\n \"tests/test_schema.py::TestNestedSchema::test_invalid_float_field\",\n \"tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields\",\n \"tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields\",\n \"tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer\",\n \"tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_field\",\n \"tests/test_schema.py::TestNestedSchema::test_all_errors_on_many_nested_field_with_validates_decorator\",\n \"tests/test_schema.py::TestNestedSchema::test_dump_validation_error\",\n \"tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself\",\n \"tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name\",\n \"tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta\",\n \"tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param\",\n \"tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields\",\n \"tests/test_schema.py::TestSelfReference::test_nested_many\",\n \"tests/test_schema.py::test_serialization_with_required_field\",\n \"tests/test_schema.py::test_deserialization_with_required_field\",\n \"tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator\",\n \"tests/test_schema.py::TestContext::test_context_method\",\n \"tests/test_schema.py::TestContext::test_context_method_function\",\n \"tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available\",\n \"tests/test_schema.py::TestContext::test_function_field_handles_bound_serializer\",\n \"tests/test_schema.py::TestContext::test_fields_context\",\n \"tests/test_schema.py::TestContext::test_nested_fields_inherit_context\",\n \"tests/test_schema.py::TestContext::test_nested_list_fields_inherit_context\",\n \"tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute\",\n \"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass\",\n \"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass\",\n \"tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro\",\n \"tests/test_schema.py::TestGetAttribute::test_get_attribute_is_used\",\n \"tests/test_schema.py::TestGetAttribute::test_get_attribute_with_many\",\n \"tests/test_schema.py::TestRequiredFields::test_required_string_field_missing\",\n \"tests/test_schema.py::TestRequiredFields::test_required_string_field_failure\",\n \"tests/test_schema.py::TestRequiredFields::test_allow_none_param\",\n \"tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message\",\n \"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output\",\n \"tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none\",\n \"tests/test_schema.py::TestDefaults::test_default_and_value_missing\",\n \"tests/test_schema.py::TestDefaults::test_loading_none\",\n \"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output\",\n \"tests/test_schema.py::TestLoadOnly::test_load_only\",\n \"tests/test_schema.py::TestLoadOnly::test_dump_only\",\n \"tests/test_schema.py::TestLoadOnly::test_url_field_requre_tld_false\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2555,"string":"2,555"},"num_tokens_patch":{"kind":"number","value":203,"string":"203"},"before_filepaths":{"kind":"list like","value":["marshmallow/fields.py"],"string":"[\n \"marshmallow/fields.py\"\n]"}}},{"rowIdx":578,"cells":{"instance_id":{"kind":"string","value":"tornadoweb__tornado-2397"},"base_commit":{"kind":"string","value":"6410cd98c1a5e938246a17cac0769f689ed471c5"},"created_at":{"kind":"string","value":"2018-05-20 18:39:50"},"environment_setup_commit":{"kind":"string","value":"6410cd98c1a5e938246a17cac0769f689ed471c5"},"hints_text":{"kind":"string","value":"ploxiln: functionally looks great"},"patch":{"kind":"string","value":"diff --git a/tornado/curl_httpclient.py b/tornado/curl_httpclient.py\nindex 54fc5b36..ef98225c 100644\n--- a/tornado/curl_httpclient.py\n+++ b/tornado/curl_httpclient.py\n@@ -348,8 +348,8 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):\n curl.setopt(pycurl.PROXY, request.proxy_host)\n curl.setopt(pycurl.PROXYPORT, request.proxy_port)\n if request.proxy_username:\n- credentials = '%s:%s' % (request.proxy_username,\n- request.proxy_password)\n+ credentials = httputil.encode_username_password(request.proxy_username,\n+ request.proxy_password)\n curl.setopt(pycurl.PROXYUSERPWD, credentials)\n \n if (request.proxy_auth_mode is None or\n@@ -441,8 +441,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):\n curl.setopt(pycurl.INFILESIZE, len(request.body or ''))\n \n if request.auth_username is not None:\n- userpwd = \"%s:%s\" % (request.auth_username, request.auth_password or '')\n-\n if request.auth_mode is None or request.auth_mode == \"basic\":\n curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)\n elif request.auth_mode == \"digest\":\n@@ -450,7 +448,9 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):\n else:\n raise ValueError(\"Unsupported auth_mode %s\" % request.auth_mode)\n \n- curl.setopt(pycurl.USERPWD, native_str(userpwd))\n+ userpwd = httputil.encode_username_password(request.auth_username,\n+ request.auth_password)\n+ curl.setopt(pycurl.USERPWD, userpwd)\n curl_log.debug(\"%s %s (username: %r)\", request.method, request.url,\n request.auth_username)\n else:\ndiff --git a/tornado/httputil.py b/tornado/httputil.py\nindex 22a64c31..d1ace5a8 100644\n--- a/tornado/httputil.py\n+++ b/tornado/httputil.py\n@@ -29,11 +29,12 @@ import email.utils\n import numbers\n import re\n import time\n+import unicodedata\n import warnings\n \n from tornado.escape import native_str, parse_qs_bytes, utf8\n from tornado.log import gen_log\n-from tornado.util import ObjectDict, PY3\n+from tornado.util import ObjectDict, PY3, unicode_type\n \n if PY3:\n import http.cookies as Cookie\n@@ -949,6 +950,20 @@ def _encode_header(key, pdict):\n return '; '.join(out)\n \n \n+def encode_username_password(username, password):\n+ \"\"\"Encodes a username/password pair in the format used by HTTP auth.\n+\n+ The return value is a byte string in the form ``username:password``.\n+\n+ .. versionadded:: 5.1\n+ \"\"\"\n+ if isinstance(username, unicode_type):\n+ username = unicodedata.normalize('NFC', username)\n+ if isinstance(password, unicode_type):\n+ password = unicodedata.normalize('NFC', password)\n+ return utf8(username) + b\":\" + utf8(password)\n+\n+\n def doctests():\n import doctest\n return doctest.DocTestSuite()\ndiff --git a/tornado/simple_httpclient.py b/tornado/simple_httpclient.py\nindex 4df4898a..35c71936 100644\n--- a/tornado/simple_httpclient.py\n+++ b/tornado/simple_httpclient.py\n@@ -1,6 +1,6 @@\n from __future__ import absolute_import, division, print_function\n \n-from tornado.escape import utf8, _unicode\n+from tornado.escape import _unicode\n from tornado import gen\n from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy\n from tornado import httputil\n@@ -308,9 +308,9 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):\n if self.request.auth_mode not in (None, \"basic\"):\n raise ValueError(\"unsupported auth_mode %s\",\n self.request.auth_mode)\n- auth = utf8(username) + b\":\" + utf8(password)\n- self.request.headers[\"Authorization\"] = (b\"Basic \" +\n- base64.b64encode(auth))\n+ self.request.headers[\"Authorization\"] = (\n+ b\"Basic \" + base64.b64encode(\n+ httputil.encode_username_password(username, password)))\n if self.request.user_agent:\n self.request.headers[\"User-Agent\"] = self.request.user_agent\n if not self.request.allow_nonstandard_methods:\n"},"problem_statement":{"kind":"string","value":"Unable to use non-ascii characters in user/password for basic auth in curl_httpclient\nSteps to reproduce (Python 3.4):\r\n1. Create tornado.httpclient.HTTPRequest with auth_username or auth_password which contains non-ascii (lower range, 0-128), for example pound sterling £ (which is 153 in ascii).\r\n2. Execute curl_httpclient fetch using that request\r\n\r\nExpected result:\r\n1. The request is successfully completed\r\n\r\nActual result:\r\n2. HTTP 599 is returned and internal exception is:\r\n'ascii' codec can't encode character '\\xa3' in position 55: ordinal not in range(128)\r\n\r\n\r\nI am not sure if I am not aware of the proper solution, but I have tried providing bytes as auth_password, but it does not solve the issue because https://github.com/tornadoweb/tornado/blob/master/tornado/curl_httpclient.py#L438 internally uses string formatting. Reading through pycurl docs (http://pycurl.io/docs/latest/unicode.html) suggests that for Python3 bytes array should be used when using curl setopt. It seems like Python3 vs Python2 issue?"},"repo":{"kind":"string","value":"tornadoweb/tornado"},"test_patch":{"kind":"string","value":"diff --git a/tornado/test/curl_httpclient_test.py b/tornado/test/curl_httpclient_test.py\nindex b7a85952..4230d4cd 100644\n--- a/tornado/test/curl_httpclient_test.py\n+++ b/tornado/test/curl_httpclient_test.py\n@@ -32,13 +32,15 @@ class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):\n \n \n class DigestAuthHandler(RequestHandler):\n+ def initialize(self, username, password):\n+ self.username = username\n+ self.password = password\n+\n def get(self):\n realm = 'test'\n opaque = 'asdf'\n # Real implementations would use a random nonce.\n nonce = \"1234\"\n- username = 'foo'\n- password = 'bar'\n \n auth_header = self.request.headers.get('Authorization', None)\n if auth_header is not None:\n@@ -53,9 +55,9 @@ class DigestAuthHandler(RequestHandler):\n assert param_dict['realm'] == realm\n assert param_dict['opaque'] == opaque\n assert param_dict['nonce'] == nonce\n- assert param_dict['username'] == username\n+ assert param_dict['username'] == self.username\n assert param_dict['uri'] == self.request.path\n- h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()\n+ h1 = md5(utf8('%s:%s:%s' % (self.username, realm, self.password))).hexdigest()\n h2 = md5(utf8('%s:%s' % (self.request.method,\n self.request.path))).hexdigest()\n digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()\n@@ -88,7 +90,8 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):\n \n def get_app(self):\n return Application([\n- ('/digest', DigestAuthHandler),\n+ ('/digest', DigestAuthHandler, {'username': 'foo', 'password': 'bar'}),\n+ ('/digest_non_ascii', DigestAuthHandler, {'username': 'foo', 'password': 'barユ£'}),\n ('/custom_reason', CustomReasonHandler),\n ('/custom_fail_reason', CustomFailReasonHandler),\n ])\n@@ -143,3 +146,8 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):\n # during the setup phase doesn't lead the request to\n # be dropped on the floor.\n response = self.fetch(u'/ユニコード', raise_error=True)\n+\n+ def test_digest_auth_non_ascii(self):\n+ response = self.fetch('/digest_non_ascii', auth_mode='digest',\n+ auth_username='foo', auth_password='barユ£')\n+ self.assertEqual(response.body, b'ok')\ndiff --git a/tornado/test/httpclient_test.py b/tornado/test/httpclient_test.py\nindex 60c8f490..fb8b12d5 100644\n--- a/tornado/test/httpclient_test.py\n+++ b/tornado/test/httpclient_test.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n from __future__ import absolute_import, division, print_function\n \n import base64\n@@ -8,6 +9,7 @@ import sys\n import threading\n import datetime\n from io import BytesIO\n+import unicodedata\n \n from tornado.escape import utf8, native_str\n from tornado import gen\n@@ -237,6 +239,7 @@ Transfer-Encoding: chunked\n self.assertIs(exc_info[0][0], ZeroDivisionError)\n \n def test_basic_auth(self):\n+ # This test data appears in section 2 of RFC 7617.\n self.assertEqual(self.fetch(\"/auth\", auth_username=\"Aladdin\",\n auth_password=\"open sesame\").body,\n b\"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\")\n@@ -247,6 +250,20 @@ Transfer-Encoding: chunked\n auth_mode=\"basic\").body,\n b\"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\")\n \n+ def test_basic_auth_unicode(self):\n+ # This test data appears in section 2.1 of RFC 7617.\n+ self.assertEqual(self.fetch(\"/auth\", auth_username=\"test\",\n+ auth_password=\"123£\").body,\n+ b\"Basic dGVzdDoxMjPCow==\")\n+\n+ # The standard mandates NFC. Give it a decomposed username\n+ # and ensure it is normalized to composed form.\n+ username = unicodedata.normalize(\"NFD\", u\"josé\")\n+ self.assertEqual(self.fetch(\"/auth\",\n+ auth_username=username,\n+ auth_password=\"səcrət\").body,\n+ b\"Basic am9zw6k6c8mZY3LJmXQ=\")\n+\n def test_unsupported_auth_mode(self):\n # curl and simple clients handle errors a bit differently; the\n # important thing is that they don't fall back to basic auth\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_hyperlinks\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 2,\n \"test_score\": 0\n },\n \"num_modified_files\": 3\n}"},"version":{"kind":"string","value":"5.0"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\",\n \"flake8\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work\ncertifi==2021.5.30\nflake8==5.0.4\nimportlib-metadata==4.2.0\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\nmccabe==0.7.0\nmore-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work\npackaging @ file:///tmp/build/80754af9/packaging_1637314298585/work\npluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work\npy @ file:///opt/conda/conda-bld/py_1644396412707/work\npycodestyle==2.9.1\npyflakes==2.5.0\npyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work\npytest==6.2.4\ntoml @ file:///tmp/build/80754af9/toml_1616166611790/work\n-e git+https://github.com/tornadoweb/tornado.git@6410cd98c1a5e938246a17cac0769f689ed471c5#egg=tornado\ntyping_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work\nzipp @ file:///tmp/build/80754af9/zipp_1633618647012/work\n"},"environment":{"kind":"string","value":"name: tornado\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - attrs=21.4.0=pyhd3eb1b0_0\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - importlib_metadata=4.8.1=hd3eb1b0_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - more-itertools=8.12.0=pyhd3eb1b0_0\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - packaging=21.3=pyhd3eb1b0_0\n - pip=21.2.2=py36h06a4308_0\n - pluggy=0.13.1=py36h06a4308_0\n - py=1.11.0=pyhd3eb1b0_0\n - pyparsing=3.0.4=pyhd3eb1b0_0\n - pytest=6.2.4=py36h06a4308_2\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - toml=0.10.2=pyhd3eb1b0_0\n - typing_extensions=4.1.1=pyh06a4308_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zipp=3.6.0=pyhd3eb1b0_0\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - flake8==5.0.4\n - importlib-metadata==4.2.0\n - mccabe==0.7.0\n - pycodestyle==2.9.1\n - pyflakes==2.5.0\nprefix: /opt/conda/envs/tornado\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_unicode"],"string":"[\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_unicode\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_304_with_content_length","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_all_methods","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_explicit_mode","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_encoding","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_sanity_checks","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked_close","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_configure_defaults","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_credentials_in_url","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_final_callback_stack_context","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_follow_redirect","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error_no_raise","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_interface","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback_stack_context","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_types","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_hello_world","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_multi_line_headers","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_non_ascii_header","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_patch_receives_payload","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_post","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_put_307","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_reuse_request_from_response","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_callback","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_stack_context","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_types","tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_unsupported_auth_mode","tornado/test/httpclient_test.py::RequestProxyTest::test_bad_attribute","tornado/test/httpclient_test.py::RequestProxyTest::test_both_set","tornado/test/httpclient_test.py::RequestProxyTest::test_default_set","tornado/test/httpclient_test.py::RequestProxyTest::test_defaults_none","tornado/test/httpclient_test.py::RequestProxyTest::test_neither_set","tornado/test/httpclient_test.py::RequestProxyTest::test_request_set","tornado/test/httpclient_test.py::HTTPResponseTestCase::test_str","tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client","tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client_error","tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body","tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body_setter","tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers","tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers_setter","tornado/test/httpclient_test.py::HTTPRequestTestCase::test_if_modified_since","tornado/test/httpclient_test.py::HTTPRequestTestCase::test_null_headers_setter","tornado/test/httpclient_test.py::HTTPErrorTestCase::test_copy","tornado/test/httpclient_test.py::HTTPErrorTestCase::test_error_with_response","tornado/test/httpclient_test.py::HTTPErrorTestCase::test_plain_error"],"string":"[\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_304_with_content_length\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_all_methods\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_explicit_mode\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_encoding\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_sanity_checks\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked_close\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_configure_defaults\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_credentials_in_url\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_final_callback_stack_context\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_follow_redirect\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error_no_raise\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_interface\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback_stack_context\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_types\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_hello_world\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_multi_line_headers\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_non_ascii_header\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_patch_receives_payload\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_post\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_put_307\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_reuse_request_from_response\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_callback\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_stack_context\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_types\",\n \"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_unsupported_auth_mode\",\n \"tornado/test/httpclient_test.py::RequestProxyTest::test_bad_attribute\",\n \"tornado/test/httpclient_test.py::RequestProxyTest::test_both_set\",\n \"tornado/test/httpclient_test.py::RequestProxyTest::test_default_set\",\n \"tornado/test/httpclient_test.py::RequestProxyTest::test_defaults_none\",\n \"tornado/test/httpclient_test.py::RequestProxyTest::test_neither_set\",\n \"tornado/test/httpclient_test.py::RequestProxyTest::test_request_set\",\n \"tornado/test/httpclient_test.py::HTTPResponseTestCase::test_str\",\n \"tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client\",\n \"tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client_error\",\n \"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body\",\n \"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body_setter\",\n \"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers\",\n \"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers_setter\",\n \"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_if_modified_since\",\n \"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_null_headers_setter\",\n \"tornado/test/httpclient_test.py::HTTPErrorTestCase::test_copy\",\n \"tornado/test/httpclient_test.py::HTTPErrorTestCase::test_error_with_response\",\n \"tornado/test/httpclient_test.py::HTTPErrorTestCase::test_plain_error\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2559,"string":"2,559"},"num_tokens_patch":{"kind":"number","value":1059,"string":"1,059"},"before_filepaths":{"kind":"list like","value":["tornado/curl_httpclient.py","tornado/httputil.py","tornado/simple_httpclient.py"],"string":"[\n \"tornado/curl_httpclient.py\",\n \"tornado/httputil.py\",\n \"tornado/simple_httpclient.py\"\n]"}}},{"rowIdx":579,"cells":{"instance_id":{"kind":"string","value":"catmaid__catpy-25"},"base_commit":{"kind":"string","value":"ab4f858dda1144bec732738f406054248af7103d"},"created_at":{"kind":"string","value":"2018-05-21 18:56:37"},"environment_setup_commit":{"kind":"string","value":"ab4f858dda1144bec732738f406054248af7103d"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/catpy/__init__.py b/catpy/__init__.py\nindex feba40e..895073e 100644\n--- a/catpy/__init__.py\n+++ b/catpy/__init__.py\n@@ -10,3 +10,5 @@ __all__ = ['client']\n \n \n from catpy.client import CatmaidClient, CoordinateTransformer, CatmaidUrl # noqa\n+from catpy import image # noqa\n+from catpy import export # noqa\ndiff --git a/catpy/image.py b/catpy/image.py\nindex a601889..cd25378 100644\n--- a/catpy/image.py\n+++ b/catpy/image.py\n@@ -5,6 +5,8 @@ from __future__ import division, unicode_literals\n import logging\n from io import BytesIO\n from collections import OrderedDict\n+\n+from requests import HTTPError\n from timeit import timeit\n import itertools\n from warnings import warn\n@@ -392,6 +394,12 @@ class Stack(object):\n \n \n class ProjectStack(Stack):\n+ orientation_choices = {\n+ 0: \"xy\",\n+ 1: \"xz\",\n+ 2: \"zy\",\n+ }\n+\n def __init__(self, dimension, translation, resolution, orientation, broken_slices=None, canary_location=None):\n \"\"\"\n Representation of an image stack as it pertains to a CATMAID project\n@@ -430,7 +438,8 @@ class ProjectStack(Stack):\n \"\"\"\n stack = cls(\n stack_info['dimension'], stack_info['translation'], stack_info['resolution'],\n- stack_info['orientation'], stack_info['broken_slices'], stack_info['canary_location']\n+ cls.orientation_choices[stack_info['orientation']], stack_info['broken_slices'],\n+ stack_info['canary_location']\n )\n mirrors = [StackMirror.from_dict(d) for d in stack_info['mirrors']]\n \n@@ -670,6 +679,12 @@ class ImageFetcher(object):\n raise ValueError('Unknown dimension of volume: should be 2D or 3D')\n return np.moveaxis(arr, (0, 1, 2), self._dimension_mappings)\n \n+ def _make_empty_tile(self, width, height=None):\n+ height = height or width\n+ tile = np.empty((height, width), dtype=np.uint8)\n+ tile.fill(self.cval)\n+ return tile\n+\n def _get_tile(self, tile_index):\n \"\"\"\n Get the tile from the cache, handle broken slices, or fetch.\n@@ -689,9 +704,7 @@ class ImageFetcher(object):\n \n if tile_index.depth in self.stack.broken_slices:\n if self.broken_slice_handling == BrokenSliceHandling.FILL and self.cval is not None:\n- tile = np.empty((tile_index.width, tile_index.height))\n- tile.fill(self.cval)\n- return tile\n+ return self._make_empty_tile(tile_index.width, tile_index.height)\n else:\n raise NotImplementedError(\n \"'fill' with a non-None cval is the only implemented broken slice handling mode\"\n@@ -813,7 +826,14 @@ class ImageFetcher(object):\n Future of np.ndarray in source orientation\n \"\"\"\n url = self.mirror.generate_url(tile_index)\n- return response_to_array(self._session.get(url, timeout=self.timeout))\n+ try:\n+ return response_to_array(self._session.get(url, timeout=self.timeout))\n+ except HTTPError as e:\n+ if e.response.status_code == 404:\n+ logger.warning(\"Tile not found at %s (error 404), returning blank tile\", url)\n+ return self._make_empty_tile(tile_index.width, tile_index.height)\n+ else:\n+ raise\n \n def _reorient_roi_tgt_to_src(self, roi_tgt):\n return roi_tgt[:, self._dimension_mappings]\n"},"problem_statement":{"kind":"string","value":"Instantiating ImageFetcher from CatMaid fails due to integer orientation"},"repo":{"kind":"string","value":"catmaid/catpy"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_image.py b/tests/test_image.py\nindex 7c02cf8..b73fa41 100644\n--- a/tests/test_image.py\n+++ b/tests/test_image.py\n@@ -6,6 +6,7 @@ import requests\n from PIL import Image\n from io import BytesIO\n from concurrent.futures import Future\n+from requests import HTTPError\n \n try:\n import mock\n@@ -508,7 +509,7 @@ def test_imagefetcher_set_mirror_title_warns_no_match(min_fetcher):\n \n def test_imagefetcher_set_mirror_title_warns_too_many(min_fetcher):\n min_fetcher.stack.mirrors.append(StackMirror(IMAGE_BASE, 1, 1, TILE_SOURCE_TYPE, 'png', 'title0', 10))\n- with pytest.warns(UserWarning, match='does not exist'):\n+ with pytest.warns(UserWarning, match='ore than one'):\n min_fetcher.mirror = 'title0'\n assert min_fetcher._mirror == min_fetcher.stack.mirrors[0]\n \n@@ -746,3 +747,17 @@ def test_imagefetcher_get_wrappers(min_fetcher, space):\n min_fetcher.get = mock.Mock()\n getattr(min_fetcher, 'get_{}_space'.format(space.value))('roi', 'zoom_level')\n min_fetcher.get.assert_called_with('roi', space, 'zoom_level', None)\n+\n+\n+def test_404_handled_correctly(min_fetcher):\n+ idx = TileIndex(0, 0, 0, 0, 100, 100)\n+ min_fetcher._session.get = mock.Mock(side_effect=HTTPError(response=mock.Mock(status_code=404)))\n+ with mock.patch('catpy.image.response_to_array', mock.Mock()):\n+ tile = min_fetcher._fetch(idx)\n+ assert tile.shape == (100, 100)\n+ assert (tile == 0).sum() == tile.size\n+\n+\n+@pytest.mark.xfail(reason=\"404 handling not implemented for threaded fetcher\")\n+def test_404_handled_correctly_threaded(min_fetcher):\n+ assert False\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_many_modified_files\",\n \"has_many_hunks\",\n \"has_pytest_match_arg\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 2,\n \"test_score\": 0\n },\n \"num_modified_files\": 2\n}"},"version":{"kind":"string","value":"unknown"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-mock\",\n \"pytest-asyncio\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements/prod.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs==22.2.0\n-e git+https://github.com/catmaid/catpy.git@ab4f858dda1144bec732738f406054248af7103d#egg=catpy\ncertifi==2021.5.30\ncoverage==6.2\ndecorator==5.1.1\nexecnet==1.9.0\nimportlib-metadata==4.8.3\niniconfig==1.1.1\nnetworkx==1.11\nnumpy==1.12.1\npackaging==21.3\nPillow==5.0.0\npluggy==1.0.0\npy==1.11.0\npyparsing==3.1.4\npytest==7.0.1\npytest-asyncio==0.16.0\npytest-cov==4.0.0\npytest-mock==3.6.1\npytest-xdist==3.0.2\nrequests==2.14.2\nrequests-futures==0.9.7\nsix==1.10.0\ntomli==1.2.3\ntyping_extensions==4.1.1\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: catpy\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - attrs==22.2.0\n - coverage==6.2\n - decorator==5.1.1\n - execnet==1.9.0\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - networkx==1.11\n - numpy==1.12.1\n - packaging==21.3\n - pillow==5.0.0\n - pluggy==1.0.0\n - py==1.11.0\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pytest-asyncio==0.16.0\n - pytest-cov==4.0.0\n - pytest-mock==3.6.1\n - pytest-xdist==3.0.2\n - requests==2.14.2\n - requests-futures==0.9.7\n - six==1.10.0\n - tomli==1.2.3\n - typing-extensions==4.1.1\n - zipp==3.6.0\nprefix: /opt/conda/envs/catpy\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_image.py::test_404_handled_correctly"],"string":"[\n \"tests/test_image.py::test_404_handled_correctly\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_image.py::test_vol_maker[0-shape0-1]","tests/test_image.py::test_vol_maker[1-shape1-2]","tests/test_image.py::test_vol_maker[0-shape2-10]","tests/test_image.py::test_vol_maker[0-shape3-46]","tests/test_image.py::test_vol_maker[1-shape4-47]","tests/test_image.py::test_response_to_array_png[L]","tests/test_image.py::test_response_to_array_png[RGB]","tests/test_image.py::test_response_to_array_png[RGBA]","tests/test_image.py::test_response_to_array_jpeg[L]","tests/test_image.py::test_response_to_array_jpeg[RGB]","tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]","tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS-{image_base}{{depth}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}]","tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.DIR_BASED-{image_base}{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]","tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.RENDER_SERVICE-{image_base}largeDataTileSource/{tile_width}/{tile_height}/{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]","tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FLIXSERVER-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]","tests/test_image.py::test_as_future_for_not_future","tests/test_image.py::test_as_future_for_future","tests/test_image.py::test_fill_tiled_cuboid","tests/test_image.py::test_fill_tiled_cuboid_raises","tests/test_image.py::test_dict_subtract_mismatched_keys","tests/test_image.py::test_dict_subtract","tests/test_image.py::test_tile_index_coords","tests/test_image.py::test_tile_index_comparable[zoom_level]","tests/test_image.py::test_tile_index_comparable[height]","tests/test_image.py::test_tile_index_comparable[width]","tests/test_image.py::test_tile_index_url_kwargs","tests/test_image.py::test_stackmirror_corrects_image_base","tests/test_image.py::test_stackmirror_corrects_file_extension","tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED]","tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS]","tests/test_image.py::test_stackmirror_formats_url[TileSourceType.DIR_BASED]","tests/test_image.py::test_stackmirror_formats_url[TileSourceType.RENDER_SERVICE]","tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FLIXSERVER]","tests/test_image.py::test_stackmirror_raises_on_incompatible_tile_index","tests/test_image.py::test_stackmirror_get_tile_index","tests/test_image.py::test_stack_sets_broken_slices_canary","tests/test_image.py::test_stack_fastest_mirror_calls_get","tests/test_image.py::test_stack_fastest_mirror_raises","tests/test_image.py::test_tilecache_can_set","tests/test_image.py::test_tilecache_set_refreshes_old","tests/test_image.py::test_tilecache_can_get","tests/test_image.py::test_tilecache_lru","tests/test_image.py::test_tilecache_can_clear","tests/test_image.py::test_tilecache_can_constrain_len","tests/test_image.py::test_tilecache_can_constrain_bytes","tests/test_image.py::test_imagefetcher_can_instantiate","tests/test_image.py::test_imagefetcher_mirror_fallback_warning","tests/test_image.py::test_imagefetcher_set_mirror_none","tests/test_image.py::test_imagefetcher_set_mirror_mirror","tests/test_image.py::test_imagefetcher_set_mirror_mirror_raises","tests/test_image.py::test_imagefetcher_set_mirror_int","tests/test_image.py::test_imagefetcher_set_mirror_int_as_str","tests/test_image.py::test_imagefetcher_set_mirror_position_warns_no_match","tests/test_image.py::test_imagefetcher_set_mirror_position_warns_too_many","tests/test_image.py::test_imagefetcher_set_mirror_title","tests/test_image.py::test_imagefetcher_set_mirror_title_warns_no_match","tests/test_image.py::test_imagefetcher_set_mirror_title_warns_too_many","tests/test_image.py::test_imagefetcher_get_auth_default","tests/test_image.py::test_imagefetcher_get_auth_from_mirror","tests/test_image.py::test_imagefetcher_get_auth_fallback","tests/test_image.py::test_imagefetcher_clear_cache","tests/test_image.py::test_imagefetcher_map_dimensions","tests/test_image.py::test_imagefetcher_reorient","tests/test_image.py::test_imagefetcher_reorient_expands","tests/test_image.py::test_imagefetcher_reorient_throws","tests/test_image.py::test_imagefetcher_roi_to_tiles[roi0-expected_drc0-expected_yx_minmax0]","tests/test_image.py::test_imagefetcher_roi_to_tiles[roi1-expected_drc1-expected_yx_minmax1]","tests/test_image.py::test_imagefetcher_roi_to_tiles[roi2-expected_drc2-expected_yx_minmax2]","tests/test_image.py::test_imagefetcher_roi_to_tiles[roi3-expected_drc3-expected_yx_minmax3]","tests/test_image.py::test_imagefetcher_roi_to_tiles[roi4-expected_drc4-expected_yx_minmax4]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-scaled-0-expected0]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-0-expected1]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack--2-expected2]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-1-expected3]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-0-expected4]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project--2-expected5]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-1-expected6]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-scaled-0-expected0]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-0-expected1]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack--2-expected2]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-1-expected3]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-0-expected4]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project--2-expected5]","tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-1-expected6]","tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ImageFetcher]","tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ThreadedImageFetcher]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi0-1]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi1-2]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi2-1]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi3-2]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi4-2]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi5-4]","tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi6-12]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi0-1]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi1-2]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi2-1]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi3-2]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi4-2]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi5-4]","tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi6-12]","tests/test_image.py::test_imagefetcher_get_into_array[ImageFetcher]","tests/test_image.py::test_imagefetcher_get_into_array[ThreadedImageFetcher]","tests/test_image.py::test_imagefetcher_get_tile_from_cache[ImageFetcher]","tests/test_image.py::test_imagefetcher_get_tile_from_cache[ThreadedImageFetcher]","tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ImageFetcher]","tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ThreadedImageFetcher]","tests/test_image.py::test_imagefetcher_get_tile_from_fetch","tests/test_image.py::test_imagefetcher_fetch","tests/test_image.py::test_imagefetcher_get_wrappers[stack]","tests/test_image.py::test_imagefetcher_get_wrappers[scaled]","tests/test_image.py::test_imagefetcher_get_wrappers[project]"],"string":"[\n \"tests/test_image.py::test_vol_maker[0-shape0-1]\",\n \"tests/test_image.py::test_vol_maker[1-shape1-2]\",\n \"tests/test_image.py::test_vol_maker[0-shape2-10]\",\n \"tests/test_image.py::test_vol_maker[0-shape3-46]\",\n \"tests/test_image.py::test_vol_maker[1-shape4-47]\",\n \"tests/test_image.py::test_response_to_array_png[L]\",\n \"tests/test_image.py::test_response_to_array_png[RGB]\",\n \"tests/test_image.py::test_response_to_array_png[RGBA]\",\n \"tests/test_image.py::test_response_to_array_jpeg[L]\",\n \"tests/test_image.py::test_response_to_array_jpeg[RGB]\",\n \"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]\",\n \"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS-{image_base}{{depth}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}]\",\n \"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.DIR_BASED-{image_base}{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]\",\n \"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.RENDER_SERVICE-{image_base}largeDataTileSource/{tile_width}/{tile_height}/{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]\",\n \"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FLIXSERVER-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]\",\n \"tests/test_image.py::test_as_future_for_not_future\",\n \"tests/test_image.py::test_as_future_for_future\",\n \"tests/test_image.py::test_fill_tiled_cuboid\",\n \"tests/test_image.py::test_fill_tiled_cuboid_raises\",\n \"tests/test_image.py::test_dict_subtract_mismatched_keys\",\n \"tests/test_image.py::test_dict_subtract\",\n \"tests/test_image.py::test_tile_index_coords\",\n \"tests/test_image.py::test_tile_index_comparable[zoom_level]\",\n \"tests/test_image.py::test_tile_index_comparable[height]\",\n \"tests/test_image.py::test_tile_index_comparable[width]\",\n \"tests/test_image.py::test_tile_index_url_kwargs\",\n \"tests/test_image.py::test_stackmirror_corrects_image_base\",\n \"tests/test_image.py::test_stackmirror_corrects_file_extension\",\n \"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED]\",\n \"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS]\",\n \"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.DIR_BASED]\",\n \"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.RENDER_SERVICE]\",\n \"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FLIXSERVER]\",\n \"tests/test_image.py::test_stackmirror_raises_on_incompatible_tile_index\",\n \"tests/test_image.py::test_stackmirror_get_tile_index\",\n \"tests/test_image.py::test_stack_sets_broken_slices_canary\",\n \"tests/test_image.py::test_stack_fastest_mirror_calls_get\",\n \"tests/test_image.py::test_stack_fastest_mirror_raises\",\n \"tests/test_image.py::test_tilecache_can_set\",\n \"tests/test_image.py::test_tilecache_set_refreshes_old\",\n \"tests/test_image.py::test_tilecache_can_get\",\n \"tests/test_image.py::test_tilecache_lru\",\n \"tests/test_image.py::test_tilecache_can_clear\",\n \"tests/test_image.py::test_tilecache_can_constrain_len\",\n \"tests/test_image.py::test_tilecache_can_constrain_bytes\",\n \"tests/test_image.py::test_imagefetcher_can_instantiate\",\n \"tests/test_image.py::test_imagefetcher_mirror_fallback_warning\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_none\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_mirror\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_mirror_raises\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_int\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_int_as_str\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_position_warns_no_match\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_position_warns_too_many\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_title\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_title_warns_no_match\",\n \"tests/test_image.py::test_imagefetcher_set_mirror_title_warns_too_many\",\n \"tests/test_image.py::test_imagefetcher_get_auth_default\",\n \"tests/test_image.py::test_imagefetcher_get_auth_from_mirror\",\n \"tests/test_image.py::test_imagefetcher_get_auth_fallback\",\n \"tests/test_image.py::test_imagefetcher_clear_cache\",\n \"tests/test_image.py::test_imagefetcher_map_dimensions\",\n \"tests/test_image.py::test_imagefetcher_reorient\",\n \"tests/test_image.py::test_imagefetcher_reorient_expands\",\n \"tests/test_image.py::test_imagefetcher_reorient_throws\",\n \"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi0-expected_drc0-expected_yx_minmax0]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi1-expected_drc1-expected_yx_minmax1]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi2-expected_drc2-expected_yx_minmax2]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi3-expected_drc3-expected_yx_minmax3]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi4-expected_drc4-expected_yx_minmax4]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-scaled-0-expected0]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-0-expected1]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack--2-expected2]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-1-expected3]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-0-expected4]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project--2-expected5]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-1-expected6]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-scaled-0-expected0]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-0-expected1]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack--2-expected2]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-1-expected3]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-0-expected4]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project--2-expected5]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-1-expected6]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ThreadedImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi0-1]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi1-2]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi2-1]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi3-2]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi4-2]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi5-4]\",\n \"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi6-12]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi0-1]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi1-2]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi2-1]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi3-2]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi4-2]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi5-4]\",\n \"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi6-12]\",\n \"tests/test_image.py::test_imagefetcher_get_into_array[ImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get_into_array[ThreadedImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get_tile_from_cache[ImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get_tile_from_cache[ThreadedImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ThreadedImageFetcher]\",\n \"tests/test_image.py::test_imagefetcher_get_tile_from_fetch\",\n \"tests/test_image.py::test_imagefetcher_fetch\",\n \"tests/test_image.py::test_imagefetcher_get_wrappers[stack]\",\n \"tests/test_image.py::test_imagefetcher_get_wrappers[scaled]\",\n \"tests/test_image.py::test_imagefetcher_get_wrappers[project]\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2565,"string":"2,565"},"num_tokens_patch":{"kind":"number","value":871,"string":"871"},"before_filepaths":{"kind":"list like","value":["catpy/__init__.py","catpy/image.py"],"string":"[\n \"catpy/__init__.py\",\n \"catpy/image.py\"\n]"}}},{"rowIdx":580,"cells":{"instance_id":{"kind":"string","value":"tomMoral__loky-128"},"base_commit":{"kind":"string","value":"7e46ee602a23251f476312357c00fd13f77f9938"},"created_at":{"kind":"string","value":"2018-05-23 10:17:18"},"environment_setup_commit":{"kind":"string","value":"1bf741a4796d15c517902a3331b5bd9e86502037"},"hints_text":{"kind":"string","value":"ogrisel: > For the future, should we do the same as in joblib and run the test of joblib as part of the CI?\r\n\r\nI agree but let's do that in another PR."},"patch":{"kind":"string","value":"diff --git a/loky/backend/semaphore_tracker.py b/loky/backend/semaphore_tracker.py\nindex 79587f2..f494237 100644\n--- a/loky/backend/semaphore_tracker.py\n+++ b/loky/backend/semaphore_tracker.py\n@@ -203,7 +203,6 @@ def main(fd):\n try:\n sem_unlink(name)\n if VERBOSE: # pragma: no cover\n- name = name.decode('ascii')\n sys.stderr.write(\"[SemaphoreTracker] unlink {}\\n\"\n .format(name))\n sys.stderr.flush()\ndiff --git a/loky/backend/semlock.py b/loky/backend/semlock.py\nindex c94c4cd..2d35f6a 100644\n--- a/loky/backend/semlock.py\n+++ b/loky/backend/semlock.py\n@@ -68,7 +68,7 @@ if sys.version_info[:2] < (3, 3):\n \n \n def sem_unlink(name):\n- if pthread.sem_unlink(name) < 0:\n+ if pthread.sem_unlink(name.encode('ascii')) < 0:\n raiseFromErrno()\n \n \n@@ -153,8 +153,8 @@ class SemLock(object):\n self.ident = 0\n self.kind = kind\n self.maxvalue = maxvalue\n- self.name = name.encode('ascii')\n- self.handle = _sem_open(self.name, value)\n+ self.name = name\n+ self.handle = _sem_open(self.name.encode('ascii'), value)\n \n def __del__(self):\n try:\n@@ -265,7 +265,7 @@ class SemLock(object):\n self.kind = kind\n self.maxvalue = maxvalue\n self.name = name\n- self.handle = _sem_open(name)\n+ self.handle = _sem_open(name.encode('ascii'))\n return self\n \n \ndiff --git a/loky/backend/synchronize.py b/loky/backend/synchronize.py\nindex 2cdb43d..4773b9d 100644\n--- a/loky/backend/synchronize.py\n+++ b/loky/backend/synchronize.py\n@@ -121,8 +121,7 @@ class SemLock(object):\n @staticmethod\n def _make_name():\n # OSX does not support long names for semaphores\n- name = '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))\n- return name\n+ return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))\n \n \n #\n"},"problem_statement":{"kind":"string","value":"loky.backend.semaphore_tracker.sem_unlink does not have same signature if coming from ctypes or _multiprocessing\n* `_multi_processing.sem_unlink` takes `str`\r\n* `loky.backend.semlock.sem_unlink` comes from `ctypes` and take `bytes`.\r\n\r\nIt feels like some code was written with the ctypes variant in mind and raise an error when the `_multiprocessing.sem_unlink` is called. Tests seem to be only testing `loky.backend.semlock.sem_unlink`.\r\n\r\n#### Context\r\nThis is an error I just saw in a joblib Travis [build](https://travis-ci.org/joblib/joblib/jobs/346847911#L4044). Note this is with loky version 1.2.1.\r\n\r\n```\r\nE /home/travis/build/joblib/joblib/joblib/externals/loky/backend/semaphore_tracker.py:195: UserWarning: semaphore_tracker: There appear to be 6 leaked semaphores to clean up at shutdown\r\nE len(cache))\r\nE /home/travis/build/joblib/joblib/joblib/externals/loky/backend/semaphore_tracker.py:211: UserWarning: semaphore_tracker: b'/loky-5456-6haleho6': TypeError('argument 1 must be str, not bytes',)\r\nE warnings.warn('semaphore_tracker: %r: %r' % (name, e)) \r\n```\r\nQuickly looking at it, it seems like this is still in master. The code where the warning happens is here:\r\n https://github.com/tomMoral/loky/blob/dec1c8144b12938dfe7bfc511009e12f25fd1cd9/loky/backend/semaphore_tracker.py#L203-L211\r\n\r\n"},"repo":{"kind":"string","value":"tomMoral/loky"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_synchronize.py b/tests/test_synchronize.py\nindex 797070d..4794f17 100644\n--- a/tests/test_synchronize.py\n+++ b/tests/test_synchronize.py\n@@ -22,7 +22,7 @@ if sys.version_info < (3, 3):\n @pytest.mark.skipif(sys.platform == \"win32\", reason=\"UNIX test\")\n def test_semlock_failure():\n from loky.backend.semlock import SemLock, sem_unlink\n- name = \"test1\"\n+ name = \"loky-test-semlock\"\n sl = SemLock(0, 1, 1, name=name)\n \n with pytest.raises(FileExistsError):\n@@ -30,7 +30,7 @@ def test_semlock_failure():\n sem_unlink(sl.name)\n \n with pytest.raises(FileNotFoundError):\n- SemLock._rebuild(None, 0, 0, name.encode('ascii'))\n+ SemLock._rebuild(None, 0, 0, name)\n \n \n def assert_sem_value_equal(sem, value):\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_hyperlinks\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 2\n },\n \"num_modified_files\": 3\n}"},"version":{"kind":"string","value":"2.1"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\",\n \"psutil\",\n \"pytest-timeout\",\n \"coverage\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work\ncertifi==2021.5.30\ncloudpickle==2.2.1\ncoverage==6.2\nimportlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\n-e git+https://github.com/tomMoral/loky.git@7e46ee602a23251f476312357c00fd13f77f9938#egg=loky\nmore-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work\npackaging @ file:///tmp/build/80754af9/packaging_1637314298585/work\npluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work\npsutil==7.0.0\npy @ file:///opt/conda/conda-bld/py_1644396412707/work\npyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work\npytest==6.2.4\npytest-timeout==2.1.0\ntoml @ file:///tmp/build/80754af9/toml_1616166611790/work\ntyping_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work\nzipp @ file:///tmp/build/80754af9/zipp_1633618647012/work\n"},"environment":{"kind":"string","value":"name: loky\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - attrs=21.4.0=pyhd3eb1b0_0\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - importlib-metadata=4.8.1=py36h06a4308_0\n - importlib_metadata=4.8.1=hd3eb1b0_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - more-itertools=8.12.0=pyhd3eb1b0_0\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - packaging=21.3=pyhd3eb1b0_0\n - pip=21.2.2=py36h06a4308_0\n - pluggy=0.13.1=py36h06a4308_0\n - py=1.11.0=pyhd3eb1b0_0\n - pyparsing=3.0.4=pyhd3eb1b0_0\n - pytest=6.2.4=py36h06a4308_2\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - toml=0.10.2=pyhd3eb1b0_0\n - typing_extensions=4.1.1=pyh06a4308_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zipp=3.6.0=pyhd3eb1b0_0\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - cloudpickle==2.2.1\n - coverage==6.2\n - psutil==7.0.0\n - pytest-timeout==2.1.0\nprefix: /opt/conda/envs/loky\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_synchronize.py::test_semlock_failure"],"string":"[\n \"tests/test_synchronize.py::test_semlock_failure\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_synchronize.py::TestLock::test_lock","tests/test_synchronize.py::TestLock::test_rlock","tests/test_synchronize.py::TestLock::test_lock_context","tests/test_synchronize.py::TestSemaphore::test_semaphore","tests/test_synchronize.py::TestSemaphore::test_bounded_semaphore","tests/test_synchronize.py::TestSemaphore::test_timeout","tests/test_synchronize.py::TestCondition::test_notify","tests/test_synchronize.py::TestCondition::test_notify_all","tests/test_synchronize.py::TestCondition::test_timeout","tests/test_synchronize.py::TestCondition::test_waitfor","tests/test_synchronize.py::TestCondition::test_wait_result","tests/test_synchronize.py::TestEvent::test_event"],"string":"[\n \"tests/test_synchronize.py::TestLock::test_lock\",\n \"tests/test_synchronize.py::TestLock::test_rlock\",\n \"tests/test_synchronize.py::TestLock::test_lock_context\",\n \"tests/test_synchronize.py::TestSemaphore::test_semaphore\",\n \"tests/test_synchronize.py::TestSemaphore::test_bounded_semaphore\",\n \"tests/test_synchronize.py::TestSemaphore::test_timeout\",\n \"tests/test_synchronize.py::TestCondition::test_notify\",\n \"tests/test_synchronize.py::TestCondition::test_notify_all\",\n \"tests/test_synchronize.py::TestCondition::test_timeout\",\n \"tests/test_synchronize.py::TestCondition::test_waitfor\",\n \"tests/test_synchronize.py::TestCondition::test_wait_result\",\n \"tests/test_synchronize.py::TestEvent::test_event\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"BSD 3-Clause \"New\" or \"Revised\" License"},"__index_level_0__":{"kind":"number","value":2575,"string":"2,575"},"num_tokens_patch":{"kind":"number","value":603,"string":"603"},"before_filepaths":{"kind":"list like","value":["loky/backend/semaphore_tracker.py","loky/backend/semlock.py","loky/backend/synchronize.py"],"string":"[\n \"loky/backend/semaphore_tracker.py\",\n \"loky/backend/semlock.py\",\n \"loky/backend/synchronize.py\"\n]"}}},{"rowIdx":581,"cells":{"instance_id":{"kind":"string","value":"google__mobly-453"},"base_commit":{"kind":"string","value":"f1aff6a7f06887424759e3c192b1bf6e13d2a6bf"},"created_at":{"kind":"string","value":"2018-05-24 19:50:41"},"environment_setup_commit":{"kind":"string","value":"95286a01a566e056d44acfa9577a45bc7f37f51d"},"hints_text":{"kind":"string","value":"xpconanfan: I don't see how this is related to logging stderr as the issue described.\nOne of the msg is incorrect?\n\n---\n\nReview status: 0 of 2 files reviewed at latest revision, all discussions resolved.\n\n---\n\n*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIjCGd2zhkKzp2ablI:b-x8c38) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*\n> ```Python\n> break\n> finally:\n> (unhandled_out, err) = proc.communicate()\n> ```\n\nwait, so this does happen?\nshouldn't we call the handler with this out instead?\n\n---\n\n\n*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453#-:-LDIjnBO4MUgZxYNImBt:blud5im)*\n\n\nwinterfroststrom: \n\n\n\nReview status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.\n\n---\n\n*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIkOBz8aQLOE0ovB8O:brjczjz) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*\nPreviously, xpconanfan (Ang Li) wrote…
\n\nwait, so this does happen?\nshouldn't we call the handler with this out instead?\n
\n\nI'm not sure?\nI'm adding logging here first to try to determine what the underlying problem is\n\n---\n\n\n*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*\n\n\nxpconanfan: \n\n\n\nReview status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.\n\n---\n\n*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIl2971UfZ3LqypsHv:b332s67) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*\nPreviously, winterfroststrom wrote…
\n\nI'm not sure?\nI'm adding logging here first to try to determine what the underlying problem is\n
\n\nseems like we should pipe all stdout content through the handler as this function promised?\nyou could add additional logging to signify the existence of stdout from `communicate`?\n\n---\n\n\n*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*\n\n\nxpconanfan: \n\n\n\nReview status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.\n\n---\n\n*[tests/mobly/controllers/android_device_lib/adb_test.py, line 156 at r1](https://reviewable.io/reviews/google/mobly/453#-LDImOOR-GL1gRhihxxl:-LDImOOR-GL1gRhihxxm:ba86vyn) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/tests/mobly/controllers/android_device_lib/adb_test.py#L156)):*\n> ```Python\n> def test_execute_and_process_stdout_logs_cmd(self, mock_debug_logger,\n> mock_popen):\n> self._mock_execute_and_process_stdout_process(mock_popen)\n> ```\n\nthis test is relying on the default mock stdout value in `_mock_execute_and_process_stdout_process`, which is difficult to read.\nCan we more explicitly set the mock value within the test?\n\n---\n\n\n*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*\n\n\nwinterfroststrom: \n\n\n\nReview status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.\n\n---\n\n*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIsvuPD4jS9CVrpnr5:bcy1d3j) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*\nPreviously, xpconanfan (Ang Li) wrote…
\n\nseems like we should pipe all stdout content through the handler as this function promised?\nyou could add additional logging to signify the existence of stdout from `communicate`?\n
\n\nSo, I've never seen this output actually get populated and I'm not sure it is in the case I'm debugging, but okay.\n\nI'm preferring changing the logged command because otherwise you'd get semi-duplicate log lines.\n\n---\n\n*[tests/mobly/controllers/android_device_lib/adb_test.py, line 156 at r1](https://reviewable.io/reviews/google/mobly/453#-LDImOOR-GL1gRhihxxl:-LDItG5yEIdICTRzP1c0:b-896fix) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/tests/mobly/controllers/android_device_lib/adb_test.py#L156)):*\nPreviously, xpconanfan (Ang Li) wrote…
\n\nthis test is relying on the default mock stdout value in `_mock_execute_and_process_stdout_process`, which is difficult to read.\nCan we more explicitly set the mock value within the test?\n
\n\nDone.\n\n---\n\n\n*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*\n\n\nxpconanfan: \n\n---\n\nReview status: 0 of 2 files reviewed at latest revision, all discussions resolved.\n\n---\n\n\n\n*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453#-:-LEBweyyBV-cQJiSKgM7:bnfp4nl)*\n\n"},"patch":{"kind":"string","value":"diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py\nindex 90dcd0b..95d1261 100644\n--- a/mobly/controllers/android_device_lib/adb.py\n+++ b/mobly/controllers/android_device_lib/adb.py\n@@ -203,6 +203,7 @@ class AdbProxy(object):\n stderr=subprocess.PIPE,\n shell=shell,\n bufsize=1)\n+ out = '[elided, processed via handler]'\n try:\n while proc.poll() is None:\n line = proc.stdout.readline()\n@@ -211,16 +212,19 @@ class AdbProxy(object):\n else:\n break\n finally:\n- (_, err) = proc.communicate()\n+ (unexpected_out, err) = proc.communicate()\n+ if unexpected_out:\n+ out = '[unexpected stdout] %s' % unexpected_out\n+ for line in unexpected_out.splitlines():\n+ handler(line)\n+\n ret = proc.returncode\n+ logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',\n+ cli_cmd_to_string(args), out, err, ret)\n if ret == 0:\n return err\n else:\n- raise AdbError(\n- cmd=args,\n- stdout='[elided, processed via handler]',\n- stderr=err,\n- ret_code=ret)\n+ raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)\n \n def _construct_adb_cmd(self, raw_name, args, shell):\n \"\"\"Constructs an adb command with arguments for a subprocess call.\ndiff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py\nindex e3e835d..03674ff 100644\n--- a/mobly/controllers/android_device_lib/snippet_client.py\n+++ b/mobly/controllers/android_device_lib/snippet_client.py\n@@ -125,8 +125,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \n # Yaaay! We're done!\n self.log.debug('Snippet %s started after %.1fs on host port %s',\n- self.package,\n- time.time() - start_time, self.host_port)\n+ self.package, time.time() - start_time, self.host_port)\n \n def restore_app_connection(self, port=None):\n \"\"\"Restores the app after device got reconnected.\n@@ -151,12 +150,13 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n try:\n self.connect()\n except:\n- # Failed to connect to app, something went wrong.\n+ # Log the original error and raise AppRestoreConnectionError.\n+ self.log.exception('Failed to re-connect to app.')\n raise jsonrpc_client_base.AppRestoreConnectionError(\n- self._ad(\n- 'Failed to restore app connection for %s at host port %s, '\n- 'device port %s'), self.package, self.host_port,\n- self.device_port)\n+ self._ad,\n+ ('Failed to restore app connection for %s at host port %s, '\n+ 'device port %s') % (self.package, self.host_port,\n+ self.device_port))\n \n # Because the previous connection was lost, update self._proc\n self._proc = None\n"},"problem_statement":{"kind":"string","value":"`_execute_and_process_stdout` should log cmd"},"repo":{"kind":"string","value":"google/mobly"},"test_patch":{"kind":"string","value":"diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py\nindex 1c75a9d..8dec8aa 100755\n--- a/tests/mobly/controllers/android_device_lib/adb_test.py\n+++ b/tests/mobly/controllers/android_device_lib/adb_test.py\n@@ -76,8 +76,7 @@ class AdbTest(unittest.TestCase):\n mock_popen.return_value.stdout.readline.side_effect = ['']\n \n mock_proc.communicate = mock.Mock(\n- return_value=(MOCK_DEFAULT_STDOUT.encode('utf-8'),\n- MOCK_DEFAULT_STDERR.encode('utf-8')))\n+ return_value=('', MOCK_DEFAULT_STDERR.encode('utf-8')))\n mock_proc.returncode = 0\n return mock_popen\n \n@@ -150,6 +149,57 @@ class AdbTest(unittest.TestCase):\n mock_handler.assert_any_call('1')\n mock_handler.assert_any_call('2')\n \n+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')\n+ def test_execute_and_process_stdout_reads_unexpected_stdout(\n+ self, mock_popen):\n+ unexpected_stdout = MOCK_DEFAULT_STDOUT.encode('utf-8')\n+\n+ self._mock_execute_and_process_stdout_process(mock_popen)\n+ mock_handler = mock.MagicMock()\n+ mock_popen.return_value.communicate = mock.Mock(\n+ return_value=(unexpected_stdout, MOCK_DEFAULT_STDERR.encode(\n+ 'utf-8')))\n+\n+ err = adb.AdbProxy()._execute_and_process_stdout(\n+ ['fake_cmd'], shell=False, handler=mock_handler)\n+ self.assertEqual(mock_handler.call_count, 1)\n+ mock_handler.assert_called_with(unexpected_stdout)\n+\n+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')\n+ @mock.patch('logging.debug')\n+ def test_execute_and_process_stdout_logs_cmd(self, mock_debug_logger,\n+ mock_popen):\n+ raw_expected_stdout = ''\n+ expected_stdout = '[elided, processed via handler]'\n+ expected_stderr = MOCK_DEFAULT_STDERR.encode('utf-8')\n+ self._mock_execute_and_process_stdout_process(mock_popen)\n+ mock_popen.return_value.communicate = mock.Mock(\n+ return_value=(raw_expected_stdout, expected_stderr))\n+\n+ err = adb.AdbProxy()._execute_and_process_stdout(\n+ ['fake_cmd'], shell=False, handler=mock.MagicMock())\n+ mock_debug_logger.assert_called_with(\n+ 'cmd: %s, stdout: %s, stderr: %s, ret: %s', 'fake_cmd',\n+ expected_stdout, expected_stderr, 0)\n+\n+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')\n+ @mock.patch('logging.debug')\n+ def test_execute_and_process_stdout_logs_cmd_with_unexpected_stdout(\n+ self, mock_debug_logger, mock_popen):\n+ raw_expected_stdout = MOCK_DEFAULT_STDOUT.encode('utf-8')\n+ expected_stdout = '[unexpected stdout] %s' % raw_expected_stdout\n+ expected_stderr = MOCK_DEFAULT_STDERR.encode('utf-8')\n+\n+ self._mock_execute_and_process_stdout_process(mock_popen)\n+ mock_popen.return_value.communicate = mock.Mock(\n+ return_value=(raw_expected_stdout, expected_stderr))\n+\n+ err = adb.AdbProxy()._execute_and_process_stdout(\n+ ['fake_cmd'], shell=False, handler=mock.MagicMock())\n+ mock_debug_logger.assert_called_with(\n+ 'cmd: %s, stdout: %s, stderr: %s, ret: %s', 'fake_cmd',\n+ expected_stdout, expected_stderr, 0)\n+\n @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')\n def test_execute_and_process_stdout_when_cmd_exits(self, mock_popen):\n self._mock_execute_and_process_stdout_process(mock_popen)\ndiff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py\nindex 2c875d8..d964ae3 100755\n--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py\n+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py\n@@ -166,6 +166,15 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase):\n self.assertEqual(789, callback._event_client.host_port)\n self.assertEqual(456, callback._event_client.device_port)\n \n+ # if unable to reconnect for any reason, a\n+ # jsonrpc_client_base.AppRestoreConnectionError is raised.\n+ mock_create_connection.side_effect = IOError('socket timed out')\n+ with self.assertRaisesRegex(\n+ jsonrpc_client_base.AppRestoreConnectionError,\n+ ('Failed to restore app connection for %s at host port %s, '\n+ 'device port %s') % (MOCK_PACKAGE_NAME, 789, 456)):\n+ client.restore_app_connection()\n+\n @mock.patch('socket.create_connection')\n @mock.patch('mobly.controllers.android_device_lib.snippet_client.'\n 'utils.start_standing_subprocess')\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_many_modified_files\",\n \"has_many_hunks\",\n \"has_pytest_match_arg\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 2,\n \"test_score\": 0\n },\n \"num_modified_files\": 2\n}"},"version":{"kind":"string","value":"1.7"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-mock\",\n \"pytest-asyncio\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.9\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"coverage==7.8.0\nexceptiongroup @ file:///croot/exceptiongroup_1706031385326/work\nexecnet==2.1.1\nfuture==1.0.0\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\n-e git+https://github.com/google/mobly.git@f1aff6a7f06887424759e3c192b1bf6e13d2a6bf#egg=mobly\nmock==1.0.1\npackaging @ file:///croot/packaging_1734472117206/work\npluggy @ file:///croot/pluggy_1733169602837/work\nportpicker==1.6.0\npsutil==7.0.0\npyserial==3.5\npytest @ file:///croot/pytest_1738938843180/work\npytest-asyncio==0.26.0\npytest-cov==6.0.0\npytest-mock==3.14.0\npytest-xdist==3.6.1\npytz==2025.2\nPyYAML==6.0.2\ntimeout-decorator==0.5.0\ntomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work\ntyping_extensions==4.13.0\n"},"environment":{"kind":"string","value":"name: mobly\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - exceptiongroup=1.2.0=py39h06a4308_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - packaging=24.2=py39h06a4308_0\n - pip=25.0=py39h06a4308_0\n - pluggy=1.5.0=py39h06a4308_0\n - pytest=8.3.4=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tomli=2.0.1=py39h06a4308_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - coverage==7.8.0\n - execnet==2.1.1\n - future==1.0.0\n - mock==1.0.1\n - portpicker==1.6.0\n - psutil==7.0.0\n - pyserial==3.5\n - pytest-asyncio==0.26.0\n - pytest-cov==6.0.0\n - pytest-mock==3.14.0\n - pytest-xdist==3.6.1\n - pytz==2025.2\n - pyyaml==6.0.2\n - timeout-decorator==0.5.0\n - typing-extensions==4.13.0\nprefix: /opt/conda/envs/mobly\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_logs_cmd","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_logs_cmd_with_unexpected_stdout","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_reads_unexpected_stdout","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client"],"string":"[\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_logs_cmd\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_logs_cmd_with_unexpected_stdout\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_reads_unexpected_stdout\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_cli_cmd_to_string","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_list","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_arg_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_arg_command_list","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_serial","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_serial_with_list","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_auto_quotes","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_list","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_arg_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_arg_command_list","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_serial","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_serial_with_list","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_special_characters","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_formats_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_formats_command_with_shell_true","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_stderr_pipe","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_error_no_timeout","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_no_timeout_success","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_timed_out","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_negative_timeout_value","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_timeout_success","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_adb_and_process_stdout_formats_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_raises_adb_error","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_reads_stdout","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_returns_stderr","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_cmd_eof","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_cmd_exits","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_handler_crash","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_forward","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_called_correctly","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_existing_command","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_newer_devices","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_older_devices","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler_with_options","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler_with_runner","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_options","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_runner","tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_without_parameters","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_header_junk","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_persistent_session","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_crash","tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client"],"string":"[\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_cli_cmd_to_string\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_list\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_arg_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_arg_command_list\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_serial\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_serial_with_list\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_auto_quotes\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_list\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_arg_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_arg_command_list\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_serial\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_serial_with_list\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_special_characters\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_formats_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_formats_command_with_shell_true\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_stderr_pipe\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_error_no_timeout\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_no_timeout_success\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_timed_out\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_negative_timeout_value\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_timeout_success\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_adb_and_process_stdout_formats_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_raises_adb_error\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_reads_stdout\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_returns_stderr\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_cmd_eof\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_cmd_exits\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_handler_crash\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_forward\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_called_correctly\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_existing_command\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_newer_devices\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_older_devices\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler_with_options\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler_with_runner\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_options\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_runner\",\n \"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_without_parameters\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_header_junk\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_persistent_session\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_crash\",\n \"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2581,"string":"2,581"},"num_tokens_patch":{"kind":"number","value":778,"string":"778"},"before_filepaths":{"kind":"list like","value":["mobly/controllers/android_device_lib/adb.py","mobly/controllers/android_device_lib/snippet_client.py"],"string":"[\n \"mobly/controllers/android_device_lib/adb.py\",\n \"mobly/controllers/android_device_lib/snippet_client.py\"\n]"}}},{"rowIdx":582,"cells":{"instance_id":{"kind":"string","value":"nipy__nipype-2597"},"base_commit":{"kind":"string","value":"9eaa2a32c8cb3569633a79d6f7968270453f9aed"},"created_at":{"kind":"string","value":"2018-05-25 13:56:19"},"environment_setup_commit":{"kind":"string","value":"704b97dee7848283692bac38f04541c5af2a87b5"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py\nindex 08d357ff6..0a59aac26 100644\n--- a/nipype/pipeline/engine/utils.py\n+++ b/nipype/pipeline/engine/utils.py\n@@ -1054,12 +1054,14 @@ def generate_expanded_graph(graph_in):\n for src_id in list(old_edge_dict.keys()):\n # Drop the original JoinNodes; only concerned with\n # generated Nodes\n- if hasattr(node, 'joinfield'):\n+ if hasattr(node, 'joinfield') and node.itername == src_id:\n continue\n # Patterns:\n # - src_id : Non-iterable node\n- # - src_id.[a-z]\\d+ : IdentityInterface w/ iterables\n- # - src_id.[a-z]I.[a-z]\\d+ : Non-IdentityInterface w/ iterables\n+ # - src_id.[a-z]\\d+ :\n+ # IdentityInterface w/ iterables or nested JoinNode\n+ # - src_id.[a-z]I.[a-z]\\d+ :\n+ # Non-IdentityInterface w/ iterables\n # - src_idJ\\d+ : JoinNode(IdentityInterface)\n if re.match(src_id + r'((\\.[a-z](I\\.[a-z])?|J)\\d+)?$',\n node.itername):\n"},"problem_statement":{"kind":"string","value":"PR #2479 has broken my package\n### Summary\r\n\r\nPR #2479 has broken my package (https://pypi.org/project/arcana/) \r\n\r\nI am not quite sure what the rationale behind the changes are so it is difficult to know how to debug or whether there is something I can change in my package.\r\n\r\n### Actual behavior\r\n\r\nWorkflow exits with error\r\n\r\n```\r\n File \"/Users/tclose/git/ni/arcana/test/mwe/nipype_pr2479/test.py\", line 71, in \r\n study.data('out')\r\n File \"/Users/tclose/git/ni/arcana/arcana/study/base.py\", line 325, in data\r\n visit_ids=visit_ids)\r\n File \"/Users/tclose/git/ni/arcana/arcana/runner/base.py\", line 37, in run\r\n return workflow.run(plugin=self._plugin)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/workflows.py\", line 595, in run\r\n runner.run(execgraph, updatehash=updatehash, config=self.config)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/plugins/linear.py\", line 44, in run\r\n node.run(updatehash=updatehash)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py\", line 480, in run\r\n result = self._run_interface(execute=True)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py\", line 564, in _run_interface\r\n return self._run_command(execute)\r\n File \"/Users/tclose/git/ni/arcana/arcana/node.py\", line 59, in _run_command\r\n result = self.nipype_cls._run_command(self, *args, **kwargs)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py\", line 888, in _run_command\r\n self._collate_join_field_inputs()\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py\", line 898, in _collate_join_field_inputs\r\n val = self._collate_input_value(field)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py\", line 928, in _collate_input_value\r\n for idx in range(self._next_slot_index)\r\n File \"/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py\", line 947, in _slot_value\r\n field, index, e))\r\nAttributeError: The join node pipeline1.pipeline1_subject_session_outputs does not have a slot field subject_session_pairsJ1 to hold the subject_session_pairs value at index 0: 'DynamicTraitedSpec' object has no attribute 'subject_session_pairsJ1'\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe workflow runs without error\r\n\r\n### How to replicate the behavior\r\n\r\nSee script below\r\n\r\n### Script/Workflow details\r\n\r\nI have tried to come up with a MWE that doesn't use my package but it was proving difficult. However, you can now install my package with pip\r\n\r\n`pip install arcana`\r\n\r\nand run the following\r\n\r\n```\r\nimport os.path\r\nimport shutil\r\nfrom nipype import config\r\nconfig.enable_debug_mode()\r\nimport nipype # @IgnorePep8\r\nfrom nipype.interfaces.utility import IdentityInterface # @IgnorePep8\r\nfrom arcana.dataset import DatasetMatch, DatasetSpec # @IgnorePep8\r\nfrom arcana.data_format import text_format # @IgnorePep8\r\nfrom arcana.study.base import Study, StudyMetaClass # @IgnorePep8\r\nfrom arcana.archive.local import LocalArchive # @IgnorePep8\r\nfrom arcana.runner import LinearRunner # @IgnorePep8\r\n\r\n\r\nBASE_ARCHIVE_DIR = os.path.join(os.path.dirname(__file__), 'archives')\r\nBASE_WORK_DIR = os.path.join(os.path.dirname(__file__), 'work')\r\n\r\nprint(nipype.get_info())\r\nprint(nipype.__version__)\r\n\r\n\r\nclass TestStudy(Study):\r\n\r\n __metaclass__ = StudyMetaClass\r\n\r\n add_data_specs = [\r\n DatasetSpec('in', text_format),\r\n DatasetSpec('out', text_format, 'pipeline')]\r\n\r\n def pipeline(self, **kwargs):\r\n pipeline = self.create_pipeline(\r\n name='pipeline1',\r\n inputs=[DatasetSpec('in', text_format)],\r\n outputs=[DatasetSpec('out', text_format)],\r\n desc=\"A dummy pipeline used to test 'run_pipeline' method\",\r\n version=1,\r\n citations=[],\r\n **kwargs)\r\n ident = pipeline.create_node(IdentityInterface(['a']),\r\n name=\"ident\")\r\n # Connect inputs\r\n pipeline.connect_input('in', ident, 'a')\r\n # Connect outputs\r\n pipeline.connect_output('out', ident, 'a')\r\n return pipeline\r\n\r\n\r\n# Create archives\r\nshutil.rmtree(BASE_ARCHIVE_DIR, ignore_errors=True)\r\nshutil.rmtree(BASE_WORK_DIR, ignore_errors=True)\r\nos.makedirs(BASE_ARCHIVE_DIR)\r\n\r\nfor sess in (['ARCHIVE1', 'SUBJECT', 'VISIT'],\r\n ['ARCHIVE2', 'SUBJECT1', 'VISIT1'],\r\n ['ARCHIVE2', 'SUBJECT1', 'VISIT2'],\r\n ['ARCHIVE2', 'SUBJECT2', 'VISIT1'],\r\n ['ARCHIVE2', 'SUBJECT2', 'VISIT2']):\r\n sess_dir = os.path.join(*([BASE_ARCHIVE_DIR] + sess))\r\n os.makedirs(sess_dir)\r\n with open(os.path.join(sess_dir, 'in.txt'), 'w') as f:\r\n f.write('in')\r\n\r\narchive1_path = os.path.join(BASE_ARCHIVE_DIR, 'ARCHIVE1')\r\narchive2_path = os.path.join(BASE_ARCHIVE_DIR, 'ARCHIVE2')\r\n\r\nwork1_path = os.path.join(BASE_WORK_DIR, 'WORK1')\r\nwork2_path = os.path.join(BASE_WORK_DIR, 'WORK2')\r\n\r\n# Attempt to run with archive with 2 subjects and 2 visits\r\nstudy = TestStudy('two',\r\n LocalArchive(archive2_path),\r\n LinearRunner(work2_path),\r\n inputs=[DatasetMatch('in', text_format, 'in')])\r\n\r\n# Fails here\r\nstudy2.data('out')\r\n\r\nprint(\"Ran study 2\")\r\n\r\n# \r\nstudy1 = TestStudy('one',\r\n LocalArchive(archive1_path),\r\n LinearRunner(work1_path),\r\n inputs=[DatasetMatch('in', text_format, 'in')])\r\n\r\nstudy1.data('out')\r\n\r\nprint(\"Ran study 1\")\r\n```\r\n\r\nto reproduce the error\r\n\r\n### Platform details:\r\n\r\n{'nibabel_version': '2.2.1', 'sys_executable': '/usr/local/opt/python@2/bin/python2.7', 'networkx_version': '1.9', 'numpy_version': '1.14.3', 'sys_platform': 'darwin', 'sys_version': '2.7.15 (default, May 1 2018, 16:44:08) \\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]', 'commit_source': 'repository', 'commit_hash': '5a96ea54a', 'pkg_path': '/Users/tclose/git/ni/nipype/nipype', 'nipype_version': '1.0.4-dev+g5a96ea54a', 'traits_version': '4.6.0', 'scipy_version': '1.1.0'}\r\n1.0.4-dev+g5a96ea54a\r\n\r\n(problem arose in 1.0.1)\r\n\r\n### Execution environment\r\n\r\nMy Homebrew python 2 environment outside container\r\n"},"repo":{"kind":"string","value":"nipy/nipype"},"test_patch":{"kind":"string","value":"diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py\nindex 54ff15048..77fc0f2fd 100644\n--- a/nipype/pipeline/engine/tests/test_join.py\n+++ b/nipype/pipeline/engine/tests/test_join.py\n@@ -627,3 +627,35 @@ def test_name_prefix_join(tmpdir):\n joinfield=['in1'])\n wf.connect(square, 'out', square_join, \"in1\")\n wf.run()\n+\n+\n+def test_join_nestediters(tmpdir):\n+ tmpdir.chdir()\n+\n+ def exponent(x, p):\n+ return x ** p\n+\n+ wf = pe.Workflow('wf', base_dir=tmpdir.strpath)\n+\n+ xs = pe.Node(IdentityInterface(['x']),\n+ iterables=[('x', [1, 2])],\n+ name='xs')\n+ ps = pe.Node(IdentityInterface(['p']),\n+ iterables=[('p', [3, 4])],\n+ name='ps')\n+ exp = pe.Node(Function(function=exponent), name='exp')\n+ exp_joinx = pe.JoinNode(Merge(1, ravel_inputs=True),\n+ name='exp_joinx',\n+ joinsource='xs',\n+ joinfield=['in1'])\n+ exp_joinp = pe.JoinNode(Merge(1, ravel_inputs=True),\n+ name='exp_joinp',\n+ joinsource='ps',\n+ joinfield=['in1'])\n+ wf.connect([\n+ (xs, exp, [('x', 'x')]),\n+ (ps, exp, [('p', 'p')]),\n+ (exp, exp_joinx, [('out', 'in1')]),\n+ (exp_joinx, exp_joinp, [('out', 'in1')])])\n+\n+ wf.run()\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_hyperlinks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 0\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"1.0"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"alabaster==0.7.13\nattrs==22.2.0\nBabel==2.11.0\ncertifi==2021.5.30\ncharset-normalizer==2.0.12\nclick==8.0.4\ncodecov==2.1.13\nconfigparser==5.2.0\ncoverage==6.2\ncycler==0.11.0\ndecorator==4.4.2\ndocutils==0.18.1\nexecnet==1.9.0\nfuncsigs==1.0.2\nfuture==1.0.0\nidna==3.10\nimagesize==1.4.1\nimportlib-metadata==4.8.3\niniconfig==1.1.1\nisodate==0.6.1\nJinja2==3.0.3\nkiwisolver==1.3.1\nlxml==5.3.1\nMarkupSafe==2.0.1\nmatplotlib==3.3.4\nmock==5.2.0\nnetworkx==2.5.1\nnibabel==3.2.2\n-e git+https://github.com/nipy/nipype.git@9eaa2a32c8cb3569633a79d6f7968270453f9aed#egg=nipype\nnumpy==1.19.5\nnumpydoc==1.1.0\npackaging==21.3\nPillow==8.4.0\npluggy==1.0.0\nprov==1.5.0\npy==1.11.0\npydot==1.4.2\npydotplus==2.0.2\nPygments==2.14.0\npyparsing==3.1.4\npytest==7.0.1\npytest-cov==4.0.0\npytest-env==0.6.2\npytest-xdist==3.0.2\npython-dateutil==2.9.0.post0\npytz==2025.2\nrdflib==5.0.0\nrequests==2.27.1\nscipy==1.5.4\nsimplejson==3.20.1\nsix==1.17.0\nsnowballstemmer==2.2.0\nSphinx==5.3.0\nsphinxcontrib-applehelp==1.0.2\nsphinxcontrib-devhelp==1.0.2\nsphinxcontrib-htmlhelp==2.0.0\nsphinxcontrib-jsmath==1.0.1\nsphinxcontrib-qthelp==1.0.3\nsphinxcontrib-serializinghtml==1.1.5\ntomli==1.2.3\ntraits==6.4.1\ntyping_extensions==4.1.1\nurllib3==1.26.20\nyapf==0.32.0\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: nipype\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - alabaster==0.7.13\n - attrs==22.2.0\n - babel==2.11.0\n - charset-normalizer==2.0.12\n - click==8.0.4\n - codecov==2.1.13\n - configparser==5.2.0\n - coverage==6.2\n - cycler==0.11.0\n - decorator==4.4.2\n - docutils==0.18.1\n - execnet==1.9.0\n - funcsigs==1.0.2\n - future==1.0.0\n - idna==3.10\n - imagesize==1.4.1\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - isodate==0.6.1\n - jinja2==3.0.3\n - kiwisolver==1.3.1\n - lxml==5.3.1\n - markupsafe==2.0.1\n - matplotlib==3.3.4\n - mock==5.2.0\n - networkx==2.5.1\n - nibabel==3.2.2\n - numpy==1.19.5\n - numpydoc==1.1.0\n - packaging==21.3\n - pillow==8.4.0\n - pluggy==1.0.0\n - prov==1.5.0\n - py==1.11.0\n - pydot==1.4.2\n - pydotplus==2.0.2\n - pygments==2.14.0\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pytest-cov==4.0.0\n - pytest-env==0.6.2\n - pytest-xdist==3.0.2\n - python-dateutil==2.9.0.post0\n - pytz==2025.2\n - rdflib==5.0.0\n - requests==2.27.1\n - scipy==1.5.4\n - simplejson==3.20.1\n - six==1.17.0\n - snowballstemmer==2.2.0\n - sphinx==5.3.0\n - sphinxcontrib-applehelp==1.0.2\n - sphinxcontrib-devhelp==1.0.2\n - sphinxcontrib-htmlhelp==2.0.0\n - sphinxcontrib-jsmath==1.0.1\n - sphinxcontrib-qthelp==1.0.3\n - sphinxcontrib-serializinghtml==1.1.5\n - tomli==1.2.3\n - traits==6.4.1\n - typing-extensions==4.1.1\n - urllib3==1.26.20\n - yapf==0.32.0\n - zipp==3.6.0\nprefix: /opt/conda/envs/nipype\n"},"FAIL_TO_PASS":{"kind":"list like","value":["nipype/pipeline/engine/tests/test_join.py::test_join_nestediters"],"string":"[\n \"nipype/pipeline/engine/tests/test_join.py::test_join_nestediters\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["nipype/pipeline/engine/tests/test_join.py::test_node_joinsource","nipype/pipeline/engine/tests/test_join.py::test_set_join_node_file_input","nipype/pipeline/engine/tests/test_join.py::test_nested_workflow_join","nipype/pipeline/engine/tests/test_join.py::test_name_prefix_join","nipype/pipeline/engine/tests/test_join.py::test_identity_join_node","nipype/pipeline/engine/tests/test_join.py::test_multiple_join_nodes","nipype/pipeline/engine/tests/test_join.py::test_unique_join_node","nipype/pipeline/engine/tests/test_join.py::test_join_expansion","nipype/pipeline/engine/tests/test_join.py::test_set_join_node","nipype/pipeline/engine/tests/test_join.py::test_multifield_join_node","nipype/pipeline/engine/tests/test_join.py::test_synchronize_join_node","nipype/pipeline/engine/tests/test_join.py::test_itersource_join_source_node","nipype/pipeline/engine/tests/test_join.py::test_itersource_two_join_nodes"],"string":"[\n \"nipype/pipeline/engine/tests/test_join.py::test_node_joinsource\",\n \"nipype/pipeline/engine/tests/test_join.py::test_set_join_node_file_input\",\n \"nipype/pipeline/engine/tests/test_join.py::test_nested_workflow_join\",\n \"nipype/pipeline/engine/tests/test_join.py::test_name_prefix_join\",\n \"nipype/pipeline/engine/tests/test_join.py::test_identity_join_node\",\n \"nipype/pipeline/engine/tests/test_join.py::test_multiple_join_nodes\",\n \"nipype/pipeline/engine/tests/test_join.py::test_unique_join_node\",\n \"nipype/pipeline/engine/tests/test_join.py::test_join_expansion\",\n \"nipype/pipeline/engine/tests/test_join.py::test_set_join_node\",\n \"nipype/pipeline/engine/tests/test_join.py::test_multifield_join_node\",\n \"nipype/pipeline/engine/tests/test_join.py::test_synchronize_join_node\",\n \"nipype/pipeline/engine/tests/test_join.py::test_itersource_join_source_node\",\n \"nipype/pipeline/engine/tests/test_join.py::test_itersource_two_join_nodes\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2586,"string":"2,586"},"num_tokens_patch":{"kind":"number","value":332,"string":"332"},"before_filepaths":{"kind":"list like","value":["nipype/pipeline/engine/utils.py"],"string":"[\n \"nipype/pipeline/engine/utils.py\"\n]"}}},{"rowIdx":583,"cells":{"instance_id":{"kind":"string","value":"theolind__pymysensors-154"},"base_commit":{"kind":"string","value":"f373e86e5423c8a92bb5adeb7b03ae7b64850e04"},"created_at":{"kind":"string","value":"2018-05-26 12:59:31"},"environment_setup_commit":{"kind":"string","value":"f373e86e5423c8a92bb5adeb7b03ae7b64850e04"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/mysensors/__init__.py b/mysensors/__init__.py\nindex f57486b..784f988 100644\n--- a/mysensors/__init__.py\n+++ b/mysensors/__init__.py\n@@ -49,7 +49,7 @@ class Gateway(object):\n self.metric = True # if true - use metric, if false - use imperial\n if persistence:\n self.persistence = Persistence(\n- self.sensors, persistence_file, persistence_scheduler)\n+ self.sensors, persistence_scheduler, persistence_file)\n else:\n self.persistence = None\n self.protocol_version = safe_is_version(protocol_version)\n@@ -351,7 +351,8 @@ class ThreadingGateway(Gateway):\n \n def __init__(self, *args, **kwargs):\n \"\"\"Set up gateway instance.\"\"\"\n- super().__init__(*args, **kwargs)\n+ super().__init__(\n+ *args, persistence_scheduler=self._create_scheduler, **kwargs)\n self.lock = threading.Lock()\n self._stop_event = threading.Event()\n self._cancel_save = None\n@@ -373,12 +374,22 @@ class ThreadingGateway(Gateway):\n continue\n time.sleep(0.02)\n \n+ def _create_scheduler(self, save_sensors):\n+ \"\"\"Return function to schedule saving sensors.\"\"\"\n+ def schedule_save():\n+ \"\"\"Save sensors and schedule a new save.\"\"\"\n+ save_sensors()\n+ scheduler = threading.Timer(10.0, schedule_save)\n+ scheduler.start()\n+ self._cancel_save = scheduler.cancel\n+ return schedule_save\n+\n def start_persistence(self):\n \"\"\"Load persistence file and schedule saving of persistence file.\"\"\"\n if not self.persistence:\n return\n self.persistence.safe_load_sensors()\n- self._cancel_save = self.persistence.schedule_save_sensors()\n+ self.persistence.schedule_save_sensors()\n \n def stop(self):\n \"\"\"Stop the background thread.\"\"\"\n@@ -494,7 +505,7 @@ class BaseAsyncGateway(BaseTransportGateway):\n \"\"\"Return function to schedule saving sensors.\"\"\"\n @asyncio.coroutine\n def schedule_save():\n- \"\"\"Return a function to cancel the schedule.\"\"\"\n+ \"\"\"Save sensors and schedule a new save.\"\"\"\n yield from self.loop.run_in_executor(None, save_sensors)\n callback = partial(\n ensure_future, schedule_save(), loop=self.loop)\ndiff --git a/mysensors/persistence.py b/mysensors/persistence.py\nindex 5dd4b57..efb2e6c 100644\n--- a/mysensors/persistence.py\n+++ b/mysensors/persistence.py\n@@ -3,35 +3,21 @@ import json\n import logging\n import os\n import pickle\n-import threading\n \n from .sensor import ChildSensor, Sensor\n \n _LOGGER = logging.getLogger(__name__)\n \n \n-def create_scheduler(save_sensors):\n- \"\"\"Return function to schedule saving sensors.\"\"\"\n- def schedule_save():\n- \"\"\"Return a function to cancel the schedule.\"\"\"\n- save_sensors()\n- scheduler = threading.Timer(10.0, schedule_save)\n- scheduler.start()\n- return scheduler.cancel\n- return schedule_save\n-\n-\n class Persistence(object):\n \"\"\"Organize persistence file saving and loading.\"\"\"\n \n def __init__(\n- self, sensors, persistence_file='mysensors.pickle',\n- schedule_factory=None):\n+ self, sensors, schedule_factory,\n+ persistence_file='mysensors.pickle'):\n \"\"\"Set up Persistence instance.\"\"\"\n self.persistence_file = persistence_file\n self.persistence_bak = '{}.bak'.format(self.persistence_file)\n- if schedule_factory is None:\n- schedule_factory = create_scheduler\n self.schedule_save_sensors = schedule_factory(self.save_sensors)\n self._sensors = sensors\n self.need_save = True\n"},"problem_statement":{"kind":"string","value":"Main program does not exit cleanly\n[branch master - version 0.14.0 - Using serial gateway - NO asyncio]\r\nAfter calling the SerialGateway.stop() method the program does not return to console but seems to be looping in a still alive thread ( probably the persistence thread).\r\n\r\n************************************************************************************\r\n```py\r\nMYSGW_Serial_Port = '/dev/ttyMSGW'\r\n....\r\nGATEWAY = mysensors.SerialGateway(\r\nMYSGW_Serial_Port, event_callback=event, persistence=True,\r\npersistence_file='./mysensors.json', protocol_version='2.0', baud=115200,\r\ntimeout=1.0, reconnect_timeout=10.0)\r\nGATEWAY.start_persistence()\r\nGATEWAY.start()\r\n....\r\n....\r\nGATEWAY.stop() #-> main thread does not go past this point\r\nexit(0)\r\n```\r\n*************************************************************************************"},"repo":{"kind":"string","value":"theolind/pymysensors"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_gateway_mqtt.py b/tests/test_gateway_mqtt.py\nindex cba4270..60700fa 100644\n--- a/tests/test_gateway_mqtt.py\n+++ b/tests/test_gateway_mqtt.py\n@@ -143,11 +143,10 @@ def test_subscribe_error(gateway, add_sensor, mock_sub, caplog):\n def test_start_stop_gateway(\n mock_save, mock_load, gateway, add_sensor, mock_pub, mock_sub):\n \"\"\"Test start and stop of MQTT gateway.\"\"\"\n- gateway.persistence = Persistence(gateway.sensors)\n- mock_cancel_save = mock.MagicMock()\n+ mock_schedule_factory = mock.MagicMock()\n mock_schedule_save = mock.MagicMock()\n- mock_schedule_save.return_value = mock_cancel_save\n- gateway.persistence.schedule_save_sensors = mock_schedule_save\n+ mock_schedule_factory.return_value = mock_schedule_save\n+ gateway.persistence = Persistence(gateway.sensors, mock_schedule_factory)\n sensor = add_sensor(1)\n sensor.add_child_sensor(1, gateway.const.Presentation.S_HUM)\n sensor.children[1].values[gateway.const.SetReq.V_HUM] = '20'\n@@ -173,7 +172,6 @@ def test_start_stop_gateway(\n assert mock_pub.call_count == 2\n assert mock_pub.mock_calls == calls\n gateway.stop()\n- assert mock_cancel_save.call_count == 1\n assert mock_save.call_count == 1\n \n \n@@ -185,7 +183,7 @@ def test_mqtt_load_persistence(gateway, add_sensor, mock_sub, tmpdir):\n \n persistence_file = tmpdir.join('file.json')\n gateway.persistence = Persistence(\n- gateway.sensors, persistence_file.strpath)\n+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)\n gateway.persistence.save_sensors()\n del gateway.sensors[1]\n assert 1 not in gateway.sensors\ndiff --git a/tests/test_mysensors.py b/tests/test_mysensors.py\nindex 403ce9d..3a6c9c8 100644\n--- a/tests/test_mysensors.py\n+++ b/tests/test_mysensors.py\n@@ -613,6 +613,28 @@ def test_gateway_low_protocol():\n assert gateway.protocol_version == '1.4'\n \n \n+@mock.patch('mysensors.persistence.Persistence.save_sensors')\n+@mock.patch('mysensors.threading.Timer')\n+def test_threading_persistence(mock_timer_class, mock_save_sensors):\n+ \"\"\"Test schedule persistence on threading gateway.\"\"\"\n+ mock_timer_1 = mock.MagicMock()\n+ mock_timer_2 = mock.MagicMock()\n+ mock_timer_class.side_effect = [mock_timer_1, mock_timer_2]\n+ gateway = ThreadingGateway(persistence=True)\n+ gateway.persistence.schedule_save_sensors()\n+ assert mock_save_sensors.call_count == 1\n+ assert mock_timer_class.call_count == 1\n+ assert mock_timer_1.start.call_count == 1\n+ gateway.persistence.schedule_save_sensors()\n+ assert mock_save_sensors.call_count == 2\n+ assert mock_timer_class.call_count == 2\n+ assert mock_timer_1.start.call_count == 1\n+ assert mock_timer_2.start.call_count == 1\n+ gateway.stop()\n+ assert mock_timer_2.cancel.call_count == 1\n+ assert mock_save_sensors.call_count == 3\n+\n+\n def test_update_fw():\n \"\"\"Test calling fw_update with bad path.\"\"\"\n gateway = ThreadingGateway()\ndiff --git a/tests/test_persistence.py b/tests/test_persistence.py\nindex fdf5464..c5d8896 100644\n--- a/tests/test_persistence.py\n+++ b/tests/test_persistence.py\n@@ -45,7 +45,7 @@ def test_persistence(gateway, add_sensor, filename, tmpdir):\n \n persistence_file = tmpdir.join(filename)\n gateway.persistence = Persistence(\n- gateway.sensors, persistence_file.strpath)\n+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)\n gateway.persistence.save_sensors()\n del gateway.sensors[1]\n assert 1 not in gateway.sensors\n@@ -75,7 +75,7 @@ def test_bad_file_name(gateway, add_sensor, tmpdir):\n add_sensor(1)\n persistence_file = tmpdir.join('file.bad')\n gateway.persistence = Persistence(\n- gateway.sensors, persistence_file.strpath)\n+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)\n with pytest.raises(Exception):\n gateway.persistence.save_sensors()\n \n@@ -85,7 +85,7 @@ def test_json_no_files(gateway, tmpdir):\n assert not gateway.sensors\n persistence_file = tmpdir.join('file.json')\n gateway.persistence = Persistence(\n- gateway.sensors, persistence_file.strpath)\n+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)\n gateway.persistence.safe_load_sensors()\n assert not gateway.sensors\n \n@@ -97,7 +97,7 @@ def test_empty_files(gateway, filename, tmpdir):\n assert not gateway.sensors\n persistence_file = tmpdir.join(filename)\n gateway.persistence = Persistence(\n- gateway.sensors, persistence_file.strpath)\n+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)\n persistence = gateway.persistence\n persistence_file.write('')\n with open(persistence.persistence_bak, 'w') as file_handle:\n@@ -112,7 +112,8 @@ def test_json_empty_file_good_bak(gateway, add_sensor, tmpdir):\n assert 1 in gateway.sensors\n persistence_file = tmpdir.join('file.json')\n orig_file_name = persistence_file.strpath\n- gateway.persistence = Persistence(gateway.sensors, orig_file_name)\n+ gateway.persistence = Persistence(\n+ gateway.sensors, mock.MagicMock(), orig_file_name)\n gateway.persistence.save_sensors()\n del gateway.sensors[1]\n assert 1 not in gateway.sensors\n@@ -160,7 +161,7 @@ def test_persistence_upgrade(\n assert 'description' not in sensor.children[0].__dict__\n persistence_file = tmpdir.join(filename)\n gateway.persistence = Persistence(\n- gateway.sensors, persistence_file.strpath)\n+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)\n gateway.persistence.save_sensors()\n del gateway.sensors[1]\n assert 1 not in gateway.sensors\n@@ -175,16 +176,21 @@ def test_persistence_upgrade(\n assert gateway.sensors[1].children[0].type == sensor.children[0].type\n \n \n-@mock.patch('mysensors.persistence.threading.Timer')\n @mock.patch('mysensors.persistence.Persistence.save_sensors')\n-def test_schedule_save_sensors(mock_save, mock_timer_class, gateway):\n+def test_schedule_save_sensors(mock_save, gateway):\n \"\"\"Test schedule save sensors.\"\"\"\n- mock_timer = mock.MagicMock()\n- mock_timer_class.return_value = mock_timer\n- gateway.persistence = Persistence(gateway.sensors)\n+ mock_schedule_save = mock.MagicMock()\n+ mock_schedule_factory = mock.MagicMock()\n+ mock_schedule_factory.return_value = mock_schedule_save\n+\n+ gateway.persistence = Persistence(gateway.sensors, mock_schedule_factory)\n+\n+ assert mock_schedule_factory.call_count == 1\n+ assert mock_schedule_factory.call_args == mock.call(mock_save)\n+\n gateway.persistence.schedule_save_sensors()\n- assert mock_save.call_count == 1\n- assert mock_timer.start.call_count == 1\n+\n+ assert mock_schedule_save.call_count == 1\n \n \n class MySensorsJSONEncoderTestUpgrade(MySensorsJSONEncoder):\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 2\n },\n \"num_modified_files\": 2\n}"},"version":{"kind":"string","value":"0.14"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"flake8\",\n \"pylint\",\n \"pydocstyle\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"astroid==2.11.7\nattrs==22.2.0\ncertifi==2021.5.30\ncrcmod==1.7\ndill==0.3.4\nflake8==5.0.4\nget-mac==0.9.2\nimportlib-metadata==4.2.0\niniconfig==1.1.1\nintelhex==2.3.0\nisort==5.10.1\nlazy-object-proxy==1.7.1\nmccabe==0.7.0\npackaging==21.3\nplatformdirs==2.4.0\npluggy==1.0.0\npy==1.11.0\npycodestyle==2.9.1\npydocstyle==6.3.0\npyflakes==2.5.0\npylint==2.13.9\n-e git+https://github.com/theolind/pymysensors.git@f373e86e5423c8a92bb5adeb7b03ae7b64850e04#egg=pymysensors\npyparsing==3.1.4\npyserial==3.5\npyserial-asyncio==0.6\npytest==7.0.1\nsnowballstemmer==2.2.0\ntomli==1.2.3\ntyped-ast==1.5.5\ntyping_extensions==4.1.1\nvoluptuous==0.11.1\nwrapt==1.16.0\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: pymysensors\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - astroid==2.11.7\n - attrs==22.2.0\n - crcmod==1.7\n - dill==0.3.4\n - flake8==5.0.4\n - get-mac==0.9.2\n - importlib-metadata==4.2.0\n - iniconfig==1.1.1\n - intelhex==2.3.0\n - isort==5.10.1\n - lazy-object-proxy==1.7.1\n - mccabe==0.7.0\n - packaging==21.3\n - platformdirs==2.4.0\n - pluggy==1.0.0\n - py==1.11.0\n - pycodestyle==2.9.1\n - pydocstyle==6.3.0\n - pyflakes==2.5.0\n - pylint==2.13.9\n - pyparsing==3.1.4\n - pyserial==3.5\n - pyserial-asyncio==0.6\n - pytest==7.0.1\n - snowballstemmer==2.2.0\n - tomli==1.2.3\n - typed-ast==1.5.5\n - typing-extensions==4.1.1\n - voluptuous==0.11.1\n - wrapt==1.16.0\n - zipp==3.6.0\nprefix: /opt/conda/envs/pymysensors\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_gateway_mqtt.py::test_start_stop_gateway","tests/test_gateway_mqtt.py::test_mqtt_load_persistence","tests/test_mysensors.py::test_threading_persistence","tests/test_persistence.py::test_persistence[file.pickle]","tests/test_persistence.py::test_persistence[file.json]","tests/test_persistence.py::test_bad_file_name","tests/test_persistence.py::test_json_no_files","tests/test_persistence.py::test_empty_files[file.pickle]","tests/test_persistence.py::test_empty_files[file.json]","tests/test_persistence.py::test_json_empty_file_good_bak","tests/test_persistence.py::test_persistence_upgrade[file.pickle]","tests/test_persistence.py::test_persistence_upgrade[file.json]","tests/test_persistence.py::test_schedule_save_sensors"],"string":"[\n \"tests/test_gateway_mqtt.py::test_start_stop_gateway\",\n \"tests/test_gateway_mqtt.py::test_mqtt_load_persistence\",\n \"tests/test_mysensors.py::test_threading_persistence\",\n \"tests/test_persistence.py::test_persistence[file.pickle]\",\n \"tests/test_persistence.py::test_persistence[file.json]\",\n \"tests/test_persistence.py::test_bad_file_name\",\n \"tests/test_persistence.py::test_json_no_files\",\n \"tests/test_persistence.py::test_empty_files[file.pickle]\",\n \"tests/test_persistence.py::test_empty_files[file.json]\",\n \"tests/test_persistence.py::test_json_empty_file_good_bak\",\n \"tests/test_persistence.py::test_persistence_upgrade[file.pickle]\",\n \"tests/test_persistence.py::test_persistence_upgrade[file.json]\",\n \"tests/test_persistence.py::test_schedule_save_sensors\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_gateway_mqtt.py::test_send","tests/test_gateway_mqtt.py::test_send_empty_string","tests/test_gateway_mqtt.py::test_send_error","tests/test_gateway_mqtt.py::test_recv","tests/test_gateway_mqtt.py::test_recv_wrong_prefix","tests/test_gateway_mqtt.py::test_presentation","tests/test_gateway_mqtt.py::test_presentation_no_sensor","tests/test_gateway_mqtt.py::test_subscribe_error","tests/test_gateway_mqtt.py::test_nested_prefix","tests/test_gateway_mqtt.py::test_get_gateway_id","tests/test_mysensors.py::test_logic_bad_message[1.4]","tests/test_mysensors.py::test_logic_bad_message[1.5]","tests/test_mysensors.py::test_logic_bad_message[2.0]","tests/test_mysensors.py::test_logic_bad_message[2.1]","tests/test_mysensors.py::test_logic_bad_message[2.2]","tests/test_mysensors.py::test_non_presented_sensor[1.4-None]","tests/test_mysensors.py::test_non_presented_sensor[1.5-None]","tests/test_mysensors.py::test_non_presented_sensor[2.0-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_non_presented_sensor[2.1-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_non_presented_sensor[2.2-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_present_to_non_sensor[1.4-None]","tests/test_mysensors.py::test_present_to_non_sensor[1.5-None]","tests/test_mysensors.py::test_present_to_non_sensor[2.0-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_present_to_non_sensor[2.1-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_present_to_non_sensor[2.2-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_internal_id_request[1.4]","tests/test_mysensors.py::test_internal_id_request[1.5]","tests/test_mysensors.py::test_internal_id_request[2.0]","tests/test_mysensors.py::test_internal_id_request[2.1]","tests/test_mysensors.py::test_internal_id_request[2.2]","tests/test_mysensors.py::test_id_request_with_node_zero[1.4]","tests/test_mysensors.py::test_id_request_with_node_zero[1.5]","tests/test_mysensors.py::test_id_request_with_node_zero[2.0]","tests/test_mysensors.py::test_id_request_with_node_zero[2.1]","tests/test_mysensors.py::test_id_request_with_node_zero[2.2]","tests/test_mysensors.py::test_presentation_arduino_node[1.4]","tests/test_mysensors.py::test_presentation_arduino_node[1.5]","tests/test_mysensors.py::test_presentation_arduino_node[2.0]","tests/test_mysensors.py::test_presentation_arduino_node[2.1]","tests/test_mysensors.py::test_presentation_arduino_node[2.2]","tests/test_mysensors.py::test_id_request_presentation[1.4]","tests/test_mysensors.py::test_id_request_presentation[1.5]","tests/test_mysensors.py::test_id_request_presentation[2.0]","tests/test_mysensors.py::test_id_request_presentation[2.1]","tests/test_mysensors.py::test_id_request_presentation[2.2]","tests/test_mysensors.py::test_internal_config[1.4]","tests/test_mysensors.py::test_internal_config[1.5]","tests/test_mysensors.py::test_internal_config[2.0]","tests/test_mysensors.py::test_internal_config[2.1]","tests/test_mysensors.py::test_internal_config[2.2]","tests/test_mysensors.py::test_internal_time[1.4]","tests/test_mysensors.py::test_internal_time[1.5]","tests/test_mysensors.py::test_internal_time[2.0]","tests/test_mysensors.py::test_internal_time[2.1]","tests/test_mysensors.py::test_internal_time[2.2]","tests/test_mysensors.py::test_internal_sketch_name[1.4]","tests/test_mysensors.py::test_internal_sketch_name[1.5]","tests/test_mysensors.py::test_internal_sketch_name[2.0]","tests/test_mysensors.py::test_internal_sketch_name[2.1]","tests/test_mysensors.py::test_internal_sketch_name[2.2]","tests/test_mysensors.py::test_internal_sketch_version[1.4]","tests/test_mysensors.py::test_internal_sketch_version[1.5]","tests/test_mysensors.py::test_internal_sketch_version[2.0]","tests/test_mysensors.py::test_internal_sketch_version[2.1]","tests/test_mysensors.py::test_internal_sketch_version[2.2]","tests/test_mysensors.py::test_internal_log_message[1.4]","tests/test_mysensors.py::test_internal_log_message[1.5]","tests/test_mysensors.py::test_internal_log_message[2.0]","tests/test_mysensors.py::test_internal_log_message[2.1]","tests/test_mysensors.py::test_internal_log_message[2.2]","tests/test_mysensors.py::test_internal_gateway_ready[1.4-None]","tests/test_mysensors.py::test_internal_gateway_ready[1.5-None]","tests/test_mysensors.py::test_internal_gateway_ready[2.0-255;255;3;0;20;\\n]","tests/test_mysensors.py::test_internal_gateway_ready[2.1-255;255;3;0;20;\\n]","tests/test_mysensors.py::test_internal_gateway_ready[2.2-255;255;3;0;20;\\n]","tests/test_mysensors.py::test_present_light_level_sensor[1.4]","tests/test_mysensors.py::test_present_light_level_sensor[1.5]","tests/test_mysensors.py::test_present_light_level_sensor[2.0]","tests/test_mysensors.py::test_present_light_level_sensor[2.1]","tests/test_mysensors.py::test_present_light_level_sensor[2.2]","tests/test_mysensors.py::test_present_humidity_sensor[1.4]","tests/test_mysensors.py::test_present_humidity_sensor[1.5]","tests/test_mysensors.py::test_present_humidity_sensor[2.0]","tests/test_mysensors.py::test_present_humidity_sensor[2.1]","tests/test_mysensors.py::test_present_humidity_sensor[2.2]","tests/test_mysensors.py::test_present_same_child[1.4]","tests/test_mysensors.py::test_present_same_child[1.5]","tests/test_mysensors.py::test_present_same_child[2.0]","tests/test_mysensors.py::test_present_same_child[2.1]","tests/test_mysensors.py::test_present_same_child[2.2]","tests/test_mysensors.py::test_set_light_level[1.4]","tests/test_mysensors.py::test_set_light_level[1.5]","tests/test_mysensors.py::test_set_light_level[2.0]","tests/test_mysensors.py::test_set_light_level[2.1]","tests/test_mysensors.py::test_set_light_level[2.2]","tests/test_mysensors.py::test_set_humidity_level[1.4]","tests/test_mysensors.py::test_set_humidity_level[1.5]","tests/test_mysensors.py::test_set_humidity_level[2.0]","tests/test_mysensors.py::test_set_humidity_level[2.1]","tests/test_mysensors.py::test_set_humidity_level[2.2]","tests/test_mysensors.py::test_battery_level[1.4]","tests/test_mysensors.py::test_battery_level[1.5]","tests/test_mysensors.py::test_battery_level[2.0]","tests/test_mysensors.py::test_battery_level[2.1]","tests/test_mysensors.py::test_battery_level[2.2]","tests/test_mysensors.py::test_bad_battery_level[1.4]","tests/test_mysensors.py::test_bad_battery_level[1.5]","tests/test_mysensors.py::test_bad_battery_level[2.0]","tests/test_mysensors.py::test_bad_battery_level[2.1]","tests/test_mysensors.py::test_bad_battery_level[2.2]","tests/test_mysensors.py::test_req[1.4]","tests/test_mysensors.py::test_req[1.5]","tests/test_mysensors.py::test_req[2.0]","tests/test_mysensors.py::test_req[2.1]","tests/test_mysensors.py::test_req[2.2]","tests/test_mysensors.py::test_req_zerovalue[1.4]","tests/test_mysensors.py::test_req_zerovalue[1.5]","tests/test_mysensors.py::test_req_zerovalue[2.0]","tests/test_mysensors.py::test_req_zerovalue[2.1]","tests/test_mysensors.py::test_req_zerovalue[2.2]","tests/test_mysensors.py::test_req_novalue[1.4]","tests/test_mysensors.py::test_req_novalue[1.5]","tests/test_mysensors.py::test_req_novalue[2.0]","tests/test_mysensors.py::test_req_novalue[2.1]","tests/test_mysensors.py::test_req_novalue[2.2]","tests/test_mysensors.py::test_req_notasensor[1.4]","tests/test_mysensors.py::test_req_notasensor[1.5]","tests/test_mysensors.py::test_req_notasensor[2.0]","tests/test_mysensors.py::test_req_notasensor[2.1]","tests/test_mysensors.py::test_req_notasensor[2.2]","tests/test_mysensors.py::test_callback[1.4]","tests/test_mysensors.py::test_callback[1.5]","tests/test_mysensors.py::test_callback[2.0]","tests/test_mysensors.py::test_callback[2.1]","tests/test_mysensors.py::test_callback[2.2]","tests/test_mysensors.py::test_callback_exception[1.4]","tests/test_mysensors.py::test_callback_exception[1.5]","tests/test_mysensors.py::test_callback_exception[2.0]","tests/test_mysensors.py::test_callback_exception[2.1]","tests/test_mysensors.py::test_callback_exception[2.2]","tests/test_mysensors.py::test_set_and_reboot[1.4]","tests/test_mysensors.py::test_set_and_reboot[1.5]","tests/test_mysensors.py::test_set_and_reboot[2.0]","tests/test_mysensors.py::test_set_and_reboot[2.1]","tests/test_mysensors.py::test_set_and_reboot[2.2]","tests/test_mysensors.py::test_set_child_value[1.4]","tests/test_mysensors.py::test_set_child_value[1.5]","tests/test_mysensors.py::test_set_child_value[2.0]","tests/test_mysensors.py::test_set_child_value[2.1]","tests/test_mysensors.py::test_set_child_value[2.2]","tests/test_mysensors.py::test_set_child_value_no_sensor[1.4-None]","tests/test_mysensors.py::test_set_child_value_no_sensor[1.5-None]","tests/test_mysensors.py::test_set_child_value_no_sensor[2.0-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_set_child_value_no_sensor[2.1-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_set_child_value_no_sensor[2.2-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_non_presented_child[1.4-None]","tests/test_mysensors.py::test_non_presented_child[1.5-None]","tests/test_mysensors.py::test_non_presented_child[2.0-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_non_presented_child[2.1-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_non_presented_child[2.2-1;255;3;0;19;\\n]","tests/test_mysensors.py::test_set_child_no_children[1.4]","tests/test_mysensors.py::test_set_child_no_children[1.5]","tests/test_mysensors.py::test_set_child_no_children[2.0]","tests/test_mysensors.py::test_set_child_no_children[2.1]","tests/test_mysensors.py::test_set_child_no_children[2.2]","tests/test_mysensors.py::test_set_child_value_bad_type[1.4]","tests/test_mysensors.py::test_set_child_value_bad_type[1.5]","tests/test_mysensors.py::test_set_child_value_bad_type[2.0]","tests/test_mysensors.py::test_set_child_value_bad_type[2.1]","tests/test_mysensors.py::test_set_child_value_bad_type[2.2]","tests/test_mysensors.py::test_set_child_value_bad_ack[1.4]","tests/test_mysensors.py::test_set_child_value_bad_ack[1.5]","tests/test_mysensors.py::test_set_child_value_bad_ack[2.0]","tests/test_mysensors.py::test_set_child_value_bad_ack[2.1]","tests/test_mysensors.py::test_set_child_value_bad_ack[2.2]","tests/test_mysensors.py::test_set_child_value_value_type[1.4]","tests/test_mysensors.py::test_set_child_value_value_type[1.5]","tests/test_mysensors.py::test_set_child_value_value_type[2.0]","tests/test_mysensors.py::test_set_child_value_value_type[2.1]","tests/test_mysensors.py::test_set_child_value_value_type[2.2]","tests/test_mysensors.py::test_child_validate[1.4]","tests/test_mysensors.py::test_child_validate[1.5]","tests/test_mysensors.py::test_child_validate[2.0]","tests/test_mysensors.py::test_child_validate[2.1]","tests/test_mysensors.py::test_child_validate[2.2]","tests/test_mysensors.py::test_set_forecast[1.4]","tests/test_mysensors.py::test_set_forecast[1.5]","tests/test_mysensors.py::test_set_forecast[2.0]","tests/test_mysensors.py::test_set_forecast[2.1]","tests/test_mysensors.py::test_set_forecast[2.2]","tests/test_mysensors.py::test_set_bad_battery_attribute[1.4]","tests/test_mysensors.py::test_set_bad_battery_attribute[1.5]","tests/test_mysensors.py::test_set_bad_battery_attribute[2.0]","tests/test_mysensors.py::test_set_bad_battery_attribute[2.1]","tests/test_mysensors.py::test_set_bad_battery_attribute[2.2]","tests/test_mysensors.py::test_set_rgb[1.5]","tests/test_mysensors.py::test_set_rgb[2.0]","tests/test_mysensors.py::test_set_rgb[2.1]","tests/test_mysensors.py::test_set_rgb[2.2]","tests/test_mysensors.py::test_set_rgbw[1.5]","tests/test_mysensors.py::test_set_rgbw[2.0]","tests/test_mysensors.py::test_set_rgbw[2.1]","tests/test_mysensors.py::test_set_rgbw[2.2]","tests/test_mysensors.py::test_smartsleep[2.0-1;255;3;0;22;\\n]","tests/test_mysensors.py::test_smartsleep[2.1-1;255;3;0;22;\\n]","tests/test_mysensors.py::test_smartsleep[2.2-1;255;3;0;32;500\\n]","tests/test_mysensors.py::test_smartsleep_from_unknown[2.0-1;255;3;0;22;\\n]","tests/test_mysensors.py::test_smartsleep_from_unknown[2.1-1;255;3;0;22;\\n]","tests/test_mysensors.py::test_smartsleep_from_unknown[2.2-1;255;3;0;32;500\\n]","tests/test_mysensors.py::test_set_with_new_state[2.0-1;255;3;0;22;\\n]","tests/test_mysensors.py::test_set_with_new_state[2.1-1;255;3;0;22;\\n]","tests/test_mysensors.py::test_set_with_new_state[2.2-1;255;3;0;32;500\\n]","tests/test_mysensors.py::test_discover_response_unknown[2.0]","tests/test_mysensors.py::test_discover_response_unknown[2.1]","tests/test_mysensors.py::test_discover_response_unknown[2.2]","tests/test_mysensors.py::test_discover_response_known[2.0]","tests/test_mysensors.py::test_discover_response_known[2.1]","tests/test_mysensors.py::test_discover_response_known[2.2]","tests/test_mysensors.py::test_set_position[2.0]","tests/test_mysensors.py::test_set_position[2.1]","tests/test_mysensors.py::test_set_position[2.2]","tests/test_mysensors.py::test_gateway_bad_protocol","tests/test_mysensors.py::test_gateway_low_protocol","tests/test_mysensors.py::test_update_fw","tests/test_mysensors.py::test_update_fw_bad_path"],"string":"[\n \"tests/test_gateway_mqtt.py::test_send\",\n \"tests/test_gateway_mqtt.py::test_send_empty_string\",\n \"tests/test_gateway_mqtt.py::test_send_error\",\n \"tests/test_gateway_mqtt.py::test_recv\",\n \"tests/test_gateway_mqtt.py::test_recv_wrong_prefix\",\n \"tests/test_gateway_mqtt.py::test_presentation\",\n \"tests/test_gateway_mqtt.py::test_presentation_no_sensor\",\n \"tests/test_gateway_mqtt.py::test_subscribe_error\",\n \"tests/test_gateway_mqtt.py::test_nested_prefix\",\n \"tests/test_gateway_mqtt.py::test_get_gateway_id\",\n \"tests/test_mysensors.py::test_logic_bad_message[1.4]\",\n \"tests/test_mysensors.py::test_logic_bad_message[1.5]\",\n \"tests/test_mysensors.py::test_logic_bad_message[2.0]\",\n \"tests/test_mysensors.py::test_logic_bad_message[2.1]\",\n \"tests/test_mysensors.py::test_logic_bad_message[2.2]\",\n \"tests/test_mysensors.py::test_non_presented_sensor[1.4-None]\",\n \"tests/test_mysensors.py::test_non_presented_sensor[1.5-None]\",\n \"tests/test_mysensors.py::test_non_presented_sensor[2.0-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_non_presented_sensor[2.1-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_non_presented_sensor[2.2-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_present_to_non_sensor[1.4-None]\",\n \"tests/test_mysensors.py::test_present_to_non_sensor[1.5-None]\",\n \"tests/test_mysensors.py::test_present_to_non_sensor[2.0-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_present_to_non_sensor[2.1-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_present_to_non_sensor[2.2-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_internal_id_request[1.4]\",\n \"tests/test_mysensors.py::test_internal_id_request[1.5]\",\n \"tests/test_mysensors.py::test_internal_id_request[2.0]\",\n \"tests/test_mysensors.py::test_internal_id_request[2.1]\",\n \"tests/test_mysensors.py::test_internal_id_request[2.2]\",\n \"tests/test_mysensors.py::test_id_request_with_node_zero[1.4]\",\n \"tests/test_mysensors.py::test_id_request_with_node_zero[1.5]\",\n \"tests/test_mysensors.py::test_id_request_with_node_zero[2.0]\",\n \"tests/test_mysensors.py::test_id_request_with_node_zero[2.1]\",\n \"tests/test_mysensors.py::test_id_request_with_node_zero[2.2]\",\n \"tests/test_mysensors.py::test_presentation_arduino_node[1.4]\",\n \"tests/test_mysensors.py::test_presentation_arduino_node[1.5]\",\n \"tests/test_mysensors.py::test_presentation_arduino_node[2.0]\",\n \"tests/test_mysensors.py::test_presentation_arduino_node[2.1]\",\n \"tests/test_mysensors.py::test_presentation_arduino_node[2.2]\",\n \"tests/test_mysensors.py::test_id_request_presentation[1.4]\",\n \"tests/test_mysensors.py::test_id_request_presentation[1.5]\",\n \"tests/test_mysensors.py::test_id_request_presentation[2.0]\",\n \"tests/test_mysensors.py::test_id_request_presentation[2.1]\",\n \"tests/test_mysensors.py::test_id_request_presentation[2.2]\",\n \"tests/test_mysensors.py::test_internal_config[1.4]\",\n \"tests/test_mysensors.py::test_internal_config[1.5]\",\n \"tests/test_mysensors.py::test_internal_config[2.0]\",\n \"tests/test_mysensors.py::test_internal_config[2.1]\",\n \"tests/test_mysensors.py::test_internal_config[2.2]\",\n \"tests/test_mysensors.py::test_internal_time[1.4]\",\n \"tests/test_mysensors.py::test_internal_time[1.5]\",\n \"tests/test_mysensors.py::test_internal_time[2.0]\",\n \"tests/test_mysensors.py::test_internal_time[2.1]\",\n \"tests/test_mysensors.py::test_internal_time[2.2]\",\n \"tests/test_mysensors.py::test_internal_sketch_name[1.4]\",\n \"tests/test_mysensors.py::test_internal_sketch_name[1.5]\",\n \"tests/test_mysensors.py::test_internal_sketch_name[2.0]\",\n \"tests/test_mysensors.py::test_internal_sketch_name[2.1]\",\n \"tests/test_mysensors.py::test_internal_sketch_name[2.2]\",\n \"tests/test_mysensors.py::test_internal_sketch_version[1.4]\",\n \"tests/test_mysensors.py::test_internal_sketch_version[1.5]\",\n \"tests/test_mysensors.py::test_internal_sketch_version[2.0]\",\n \"tests/test_mysensors.py::test_internal_sketch_version[2.1]\",\n \"tests/test_mysensors.py::test_internal_sketch_version[2.2]\",\n \"tests/test_mysensors.py::test_internal_log_message[1.4]\",\n \"tests/test_mysensors.py::test_internal_log_message[1.5]\",\n \"tests/test_mysensors.py::test_internal_log_message[2.0]\",\n \"tests/test_mysensors.py::test_internal_log_message[2.1]\",\n \"tests/test_mysensors.py::test_internal_log_message[2.2]\",\n \"tests/test_mysensors.py::test_internal_gateway_ready[1.4-None]\",\n \"tests/test_mysensors.py::test_internal_gateway_ready[1.5-None]\",\n \"tests/test_mysensors.py::test_internal_gateway_ready[2.0-255;255;3;0;20;\\\\n]\",\n \"tests/test_mysensors.py::test_internal_gateway_ready[2.1-255;255;3;0;20;\\\\n]\",\n \"tests/test_mysensors.py::test_internal_gateway_ready[2.2-255;255;3;0;20;\\\\n]\",\n \"tests/test_mysensors.py::test_present_light_level_sensor[1.4]\",\n \"tests/test_mysensors.py::test_present_light_level_sensor[1.5]\",\n \"tests/test_mysensors.py::test_present_light_level_sensor[2.0]\",\n \"tests/test_mysensors.py::test_present_light_level_sensor[2.1]\",\n \"tests/test_mysensors.py::test_present_light_level_sensor[2.2]\",\n \"tests/test_mysensors.py::test_present_humidity_sensor[1.4]\",\n \"tests/test_mysensors.py::test_present_humidity_sensor[1.5]\",\n \"tests/test_mysensors.py::test_present_humidity_sensor[2.0]\",\n \"tests/test_mysensors.py::test_present_humidity_sensor[2.1]\",\n \"tests/test_mysensors.py::test_present_humidity_sensor[2.2]\",\n \"tests/test_mysensors.py::test_present_same_child[1.4]\",\n \"tests/test_mysensors.py::test_present_same_child[1.5]\",\n \"tests/test_mysensors.py::test_present_same_child[2.0]\",\n \"tests/test_mysensors.py::test_present_same_child[2.1]\",\n \"tests/test_mysensors.py::test_present_same_child[2.2]\",\n \"tests/test_mysensors.py::test_set_light_level[1.4]\",\n \"tests/test_mysensors.py::test_set_light_level[1.5]\",\n \"tests/test_mysensors.py::test_set_light_level[2.0]\",\n \"tests/test_mysensors.py::test_set_light_level[2.1]\",\n \"tests/test_mysensors.py::test_set_light_level[2.2]\",\n \"tests/test_mysensors.py::test_set_humidity_level[1.4]\",\n \"tests/test_mysensors.py::test_set_humidity_level[1.5]\",\n \"tests/test_mysensors.py::test_set_humidity_level[2.0]\",\n \"tests/test_mysensors.py::test_set_humidity_level[2.1]\",\n \"tests/test_mysensors.py::test_set_humidity_level[2.2]\",\n \"tests/test_mysensors.py::test_battery_level[1.4]\",\n \"tests/test_mysensors.py::test_battery_level[1.5]\",\n \"tests/test_mysensors.py::test_battery_level[2.0]\",\n \"tests/test_mysensors.py::test_battery_level[2.1]\",\n \"tests/test_mysensors.py::test_battery_level[2.2]\",\n \"tests/test_mysensors.py::test_bad_battery_level[1.4]\",\n \"tests/test_mysensors.py::test_bad_battery_level[1.5]\",\n \"tests/test_mysensors.py::test_bad_battery_level[2.0]\",\n \"tests/test_mysensors.py::test_bad_battery_level[2.1]\",\n \"tests/test_mysensors.py::test_bad_battery_level[2.2]\",\n \"tests/test_mysensors.py::test_req[1.4]\",\n \"tests/test_mysensors.py::test_req[1.5]\",\n \"tests/test_mysensors.py::test_req[2.0]\",\n \"tests/test_mysensors.py::test_req[2.1]\",\n \"tests/test_mysensors.py::test_req[2.2]\",\n \"tests/test_mysensors.py::test_req_zerovalue[1.4]\",\n \"tests/test_mysensors.py::test_req_zerovalue[1.5]\",\n \"tests/test_mysensors.py::test_req_zerovalue[2.0]\",\n \"tests/test_mysensors.py::test_req_zerovalue[2.1]\",\n \"tests/test_mysensors.py::test_req_zerovalue[2.2]\",\n \"tests/test_mysensors.py::test_req_novalue[1.4]\",\n \"tests/test_mysensors.py::test_req_novalue[1.5]\",\n \"tests/test_mysensors.py::test_req_novalue[2.0]\",\n \"tests/test_mysensors.py::test_req_novalue[2.1]\",\n \"tests/test_mysensors.py::test_req_novalue[2.2]\",\n \"tests/test_mysensors.py::test_req_notasensor[1.4]\",\n \"tests/test_mysensors.py::test_req_notasensor[1.5]\",\n \"tests/test_mysensors.py::test_req_notasensor[2.0]\",\n \"tests/test_mysensors.py::test_req_notasensor[2.1]\",\n \"tests/test_mysensors.py::test_req_notasensor[2.2]\",\n \"tests/test_mysensors.py::test_callback[1.4]\",\n \"tests/test_mysensors.py::test_callback[1.5]\",\n \"tests/test_mysensors.py::test_callback[2.0]\",\n \"tests/test_mysensors.py::test_callback[2.1]\",\n \"tests/test_mysensors.py::test_callback[2.2]\",\n \"tests/test_mysensors.py::test_callback_exception[1.4]\",\n \"tests/test_mysensors.py::test_callback_exception[1.5]\",\n \"tests/test_mysensors.py::test_callback_exception[2.0]\",\n \"tests/test_mysensors.py::test_callback_exception[2.1]\",\n \"tests/test_mysensors.py::test_callback_exception[2.2]\",\n \"tests/test_mysensors.py::test_set_and_reboot[1.4]\",\n \"tests/test_mysensors.py::test_set_and_reboot[1.5]\",\n \"tests/test_mysensors.py::test_set_and_reboot[2.0]\",\n \"tests/test_mysensors.py::test_set_and_reboot[2.1]\",\n \"tests/test_mysensors.py::test_set_and_reboot[2.2]\",\n \"tests/test_mysensors.py::test_set_child_value[1.4]\",\n \"tests/test_mysensors.py::test_set_child_value[1.5]\",\n \"tests/test_mysensors.py::test_set_child_value[2.0]\",\n \"tests/test_mysensors.py::test_set_child_value[2.1]\",\n \"tests/test_mysensors.py::test_set_child_value[2.2]\",\n \"tests/test_mysensors.py::test_set_child_value_no_sensor[1.4-None]\",\n \"tests/test_mysensors.py::test_set_child_value_no_sensor[1.5-None]\",\n \"tests/test_mysensors.py::test_set_child_value_no_sensor[2.0-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_set_child_value_no_sensor[2.1-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_set_child_value_no_sensor[2.2-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_non_presented_child[1.4-None]\",\n \"tests/test_mysensors.py::test_non_presented_child[1.5-None]\",\n \"tests/test_mysensors.py::test_non_presented_child[2.0-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_non_presented_child[2.1-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_non_presented_child[2.2-1;255;3;0;19;\\\\n]\",\n \"tests/test_mysensors.py::test_set_child_no_children[1.4]\",\n \"tests/test_mysensors.py::test_set_child_no_children[1.5]\",\n \"tests/test_mysensors.py::test_set_child_no_children[2.0]\",\n \"tests/test_mysensors.py::test_set_child_no_children[2.1]\",\n \"tests/test_mysensors.py::test_set_child_no_children[2.2]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_type[1.4]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_type[1.5]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_type[2.0]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_type[2.1]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_type[2.2]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_ack[1.4]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_ack[1.5]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_ack[2.0]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_ack[2.1]\",\n \"tests/test_mysensors.py::test_set_child_value_bad_ack[2.2]\",\n \"tests/test_mysensors.py::test_set_child_value_value_type[1.4]\",\n \"tests/test_mysensors.py::test_set_child_value_value_type[1.5]\",\n \"tests/test_mysensors.py::test_set_child_value_value_type[2.0]\",\n \"tests/test_mysensors.py::test_set_child_value_value_type[2.1]\",\n \"tests/test_mysensors.py::test_set_child_value_value_type[2.2]\",\n \"tests/test_mysensors.py::test_child_validate[1.4]\",\n \"tests/test_mysensors.py::test_child_validate[1.5]\",\n \"tests/test_mysensors.py::test_child_validate[2.0]\",\n \"tests/test_mysensors.py::test_child_validate[2.1]\",\n \"tests/test_mysensors.py::test_child_validate[2.2]\",\n \"tests/test_mysensors.py::test_set_forecast[1.4]\",\n \"tests/test_mysensors.py::test_set_forecast[1.5]\",\n \"tests/test_mysensors.py::test_set_forecast[2.0]\",\n \"tests/test_mysensors.py::test_set_forecast[2.1]\",\n \"tests/test_mysensors.py::test_set_forecast[2.2]\",\n \"tests/test_mysensors.py::test_set_bad_battery_attribute[1.4]\",\n \"tests/test_mysensors.py::test_set_bad_battery_attribute[1.5]\",\n \"tests/test_mysensors.py::test_set_bad_battery_attribute[2.0]\",\n \"tests/test_mysensors.py::test_set_bad_battery_attribute[2.1]\",\n \"tests/test_mysensors.py::test_set_bad_battery_attribute[2.2]\",\n \"tests/test_mysensors.py::test_set_rgb[1.5]\",\n \"tests/test_mysensors.py::test_set_rgb[2.0]\",\n \"tests/test_mysensors.py::test_set_rgb[2.1]\",\n \"tests/test_mysensors.py::test_set_rgb[2.2]\",\n \"tests/test_mysensors.py::test_set_rgbw[1.5]\",\n \"tests/test_mysensors.py::test_set_rgbw[2.0]\",\n \"tests/test_mysensors.py::test_set_rgbw[2.1]\",\n \"tests/test_mysensors.py::test_set_rgbw[2.2]\",\n \"tests/test_mysensors.py::test_smartsleep[2.0-1;255;3;0;22;\\\\n]\",\n \"tests/test_mysensors.py::test_smartsleep[2.1-1;255;3;0;22;\\\\n]\",\n \"tests/test_mysensors.py::test_smartsleep[2.2-1;255;3;0;32;500\\\\n]\",\n \"tests/test_mysensors.py::test_smartsleep_from_unknown[2.0-1;255;3;0;22;\\\\n]\",\n \"tests/test_mysensors.py::test_smartsleep_from_unknown[2.1-1;255;3;0;22;\\\\n]\",\n \"tests/test_mysensors.py::test_smartsleep_from_unknown[2.2-1;255;3;0;32;500\\\\n]\",\n \"tests/test_mysensors.py::test_set_with_new_state[2.0-1;255;3;0;22;\\\\n]\",\n \"tests/test_mysensors.py::test_set_with_new_state[2.1-1;255;3;0;22;\\\\n]\",\n \"tests/test_mysensors.py::test_set_with_new_state[2.2-1;255;3;0;32;500\\\\n]\",\n \"tests/test_mysensors.py::test_discover_response_unknown[2.0]\",\n \"tests/test_mysensors.py::test_discover_response_unknown[2.1]\",\n \"tests/test_mysensors.py::test_discover_response_unknown[2.2]\",\n \"tests/test_mysensors.py::test_discover_response_known[2.0]\",\n \"tests/test_mysensors.py::test_discover_response_known[2.1]\",\n \"tests/test_mysensors.py::test_discover_response_known[2.2]\",\n \"tests/test_mysensors.py::test_set_position[2.0]\",\n \"tests/test_mysensors.py::test_set_position[2.1]\",\n \"tests/test_mysensors.py::test_set_position[2.2]\",\n \"tests/test_mysensors.py::test_gateway_bad_protocol\",\n \"tests/test_mysensors.py::test_gateway_low_protocol\",\n \"tests/test_mysensors.py::test_update_fw\",\n \"tests/test_mysensors.py::test_update_fw_bad_path\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2590,"string":"2,590"},"num_tokens_patch":{"kind":"number","value":847,"string":"847"},"before_filepaths":{"kind":"list like","value":["mysensors/__init__.py","mysensors/persistence.py"],"string":"[\n \"mysensors/__init__.py\",\n \"mysensors/persistence.py\"\n]"}}},{"rowIdx":584,"cells":{"instance_id":{"kind":"string","value":"acorg__dark-matter-576"},"base_commit":{"kind":"string","value":"66f246ba9417430e3f00e94ca0abc88de59a92d4"},"created_at":{"kind":"string","value":"2018-05-27 14:07:28"},"environment_setup_commit":{"kind":"string","value":"66f246ba9417430e3f00e94ca0abc88de59a92d4"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/dark/__init__.py b/dark/__init__.py\nindex 0246a07..6a59296 100644\n--- a/dark/__init__.py\n+++ b/dark/__init__.py\n@@ -7,4 +7,4 @@ if sys.version_info < (2, 7):\n # will not be found by the version() function in ../setup.py\n #\n # Remember to update ../CHANGELOG.md describing what's new in each version.\n-__version__ = '3.0.5'\n+__version__ = '3.0.6'\ndiff --git a/dark/filter.py b/dark/filter.py\nindex 0665ffc..b0daa76 100644\n--- a/dark/filter.py\n+++ b/dark/filter.py\n@@ -279,6 +279,23 @@ def addFASTAFilteringCommandLineOptions(parser):\n help=('A file of (1-based) sequence numbers to retain. Numbers must '\n 'be one per line.'))\n \n+ parser.add_argument(\n+ '--idLambda', metavar='LAMBDA-FUNCTION',\n+ help=('A one-argument function taking and returning a read id. '\n+ 'E.g., --idLambda \"lambda id: id.split(\\'_\\')[0]\" or '\n+ '--idLambda \"lambda id: id[:10]\". If the function returns None, '\n+ 'the read will be filtered out.'))\n+\n+ parser.add_argument(\n+ '--readLambda', metavar='LAMBDA-FUNCTION',\n+ help=('A one-argument function taking and returning a read. '\n+ 'E.g., --readLambda \"lambda r: Read(r.id.split(\\'_\\')[0], '\n+ 'r.sequence.strip(\\'-\\')\". Make sure to also modify the quality '\n+ 'string if you change the length of a FASTQ sequence. If the '\n+ 'function returns None, the read will be filtered out. The '\n+ 'function will be passed to eval with the dark.reads classes '\n+ 'Read, DNARead, AARead, etc. all in scope.'))\n+\n # A mutually exclusive group for --keepSites, --keepSitesFile,\n # --removeSites, and --removeSitesFile.\n group = parser.add_mutually_exclusive_group()\n@@ -381,4 +398,5 @@ def parseFASTAFilteringCommandLineOptions(args, reads):\n randomSubset=args.randomSubset, trueLength=args.trueLength,\n sampleFraction=args.sampleFraction,\n sequenceNumbersFile=args.sequenceNumbersFile,\n+ idLambda=args.idLambda, readLambda=args.readLambda,\n keepSites=keepSites, removeSites=removeSites)\ndiff --git a/dark/reads.py b/dark/reads.py\nindex 42390e4..1074f78 100644\n--- a/dark/reads.py\n+++ b/dark/reads.py\n@@ -740,8 +740,9 @@ class ReadFilter(object):\n sequence identity.\n @param removeDuplicatesById: If C{True} remove duplicated reads based\n only on read id.\n- @param removeDescriptions: If C{True} remove the description part of read\n- ids (i.e., the part following the first whitespace).\n+ @param removeDescriptions: If C{True} remove the description (the part\n+ following the first whitespace) from read ids. The description is\n+ removed after applying the function specified by --idLambda (if any).\n @param modifier: If not C{None}, a function that is passed a read\n and which either returns a read or C{None}. If it returns a read,\n that read is passed through the filter. If it returns C{None},\n@@ -791,6 +792,14 @@ class ReadFilter(object):\n file containing (1-based) sequence numbers, in ascending order,\n one per line. Only those sequences matching the given numbers will\n be kept.\n+ @param idLambda: If not C{None}, a C{str} Python lambda function\n+ specification to use to modify read ids. The function is applied\n+ before removing the description (if --removeDescriptions is also\n+ specified).\n+ @param readLambda: If not C{None}, a C{str} Python lambda function\n+ specification to use to modify reads. The function will be passed,\n+ and must return, a single Read (or one of its subclasses). This\n+ function is called after the --idLambda function, if any.\n @param keepSites: A set of C{int} 0-based sites (i.e., indices) in\n sequences that should be kept. If C{None} (the default), all sites are\n kept.\n@@ -819,7 +828,8 @@ class ReadFilter(object):\n removeDuplicates=False, removeDuplicatesById=False,\n removeDescriptions=False, modifier=None, randomSubset=None,\n trueLength=None, sampleFraction=None,\n- sequenceNumbersFile=None, keepSites=None, removeSites=None):\n+ sequenceNumbersFile=None, idLambda=None, readLambda=None,\n+ keepSites=None, removeSites=None):\n \n if randomSubset is not None:\n if sampleFraction is not None:\n@@ -929,6 +939,9 @@ class ReadFilter(object):\n sampleFraction = None\n self.sampleFraction = sampleFraction\n \n+ self.idLambda = eval(idLambda) if idLambda else None\n+ self.readLambda = eval(readLambda) if readLambda else None\n+\n def filter(self, read):\n \"\"\"\n Check if a read passes the filter.\n@@ -1038,6 +1051,20 @@ class ReadFilter(object):\n elif self.removeSites is not None:\n read = read.newFromSites(self.removeSites, exclude=True)\n \n+ if self.idLambda:\n+ newId = self.idLambda(read.id)\n+ if newId is None:\n+ return False\n+ else:\n+ read.id = newId\n+\n+ if self.readLambda:\n+ newRead = self.readLambda(read)\n+ if newRead is None:\n+ return False\n+ else:\n+ read = newRead\n+\n if self.removeDescriptions:\n read.id = read.id.split()[0]\n \n"},"problem_statement":{"kind":"string","value":"Add ability to give an anonymous Python function for read id conversion when filtering FASTA"},"repo":{"kind":"string","value":"acorg/dark-matter"},"test_patch":{"kind":"string","value":"diff --git a/test/test_reads.py b/test/test_reads.py\nindex 4e51442..5d9cd3e 100644\n--- a/test/test_reads.py\n+++ b/test/test_reads.py\n@@ -3126,6 +3126,52 @@ class TestReadsFiltering(TestCase):\n six.assertRaisesRegex(self, ValueError, error, Reads().filter,\n keepSites={4}, removeSites={5})\n \n+ def testIdLambda(self):\n+ \"\"\"\n+ A passed idLambda function should produce the expected read ids.\n+ \"\"\"\n+ read = Read('id1', 'ATCGCC')\n+ reads = Reads(initialReads=[read])\n+ result = reads.filter(idLambda='lambda id: \"x-\" + id.upper()')\n+ self.assertEqual('x-ID1', list(result)[0].id)\n+\n+ def testIdLambdaReturningNone(self):\n+ \"\"\"\n+ A passed idLambda function should produce the expected read ids,\n+ including when it returns None.\n+ \"\"\"\n+ read1 = Read('id1', 'ATCGCC')\n+ read2 = Read('id2', 'GGATCG')\n+ reads = Reads(initialReads=[read1, read2])\n+ result = reads.filter(\n+ idLambda='lambda id: \"aa\" if id.find(\"1\") > -1 else None')\n+ (result,) = list(result)\n+ self.assertEqual('aa', result.id)\n+\n+ def testReadLambda(self):\n+ \"\"\"\n+ A passed readLambda function should produce the expected reads.\n+ \"\"\"\n+ read = Read('id1', 'ATCGCC')\n+ reads = Reads(initialReads=[read])\n+ result = reads.filter(readLambda='lambda r: Read(\"hey\", \"AAA\")')\n+ (result,) = list(result)\n+ self.assertEqual(Read('hey', 'AAA'), result)\n+\n+ def testReadLambdaReturningNone(self):\n+ \"\"\"\n+ A passed readLambda function should produce the expected reads,\n+ including when it returns None.\n+ \"\"\"\n+ read1 = Read('xid1', 'ATCGCC')\n+ read2 = Read('yid2', 'GGATCG')\n+ reads = Reads(initialReads=[read1, read2])\n+ result = reads.filter(\n+ readLambda=('lambda r: Read(r.id + \"-x\", r.sequence[:2]) '\n+ 'if r.id.startswith(\"x\") else None'))\n+ (result,) = list(result)\n+ self.assertEqual(Read('xid1-x', 'AT'), result)\n+\n \n class TestReadsInRAM(TestCase):\n \"\"\"\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_many_modified_files\",\n \"has_many_hunks\",\n \"has_pytest_match_arg\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 3,\n \"test_score\": 3\n },\n \"num_modified_files\": 3\n}"},"version":{"kind":"string","value":"unknown"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs==22.2.0\nbackcall==0.2.0\nbiopython==1.79\nbz2file==0.98\ncertifi==2021.5.30\ncffi==1.15.1\ncharset-normalizer==2.0.12\ncoverage==6.2\ncycler==0.11.0\n-e git+https://github.com/acorg/dark-matter.git@66f246ba9417430e3f00e94ca0abc88de59a92d4#egg=dark_matter\ndecorator==5.1.1\nexecnet==1.9.0\nidna==3.10\nimportlib-metadata==4.8.3\niniconfig==1.1.1\nipython==7.16.3\nipython-genutils==0.2.0\njedi==0.17.2\nkiwisolver==1.3.1\nmatplotlib==3.3.4\nnumpy==1.19.5\npackaging==21.3\nparso==0.7.1\npexpect==4.9.0\npickleshare==0.7.5\nPillow==8.4.0\npluggy==1.0.0\nprompt-toolkit==3.0.36\nptyprocess==0.7.0\npy==1.11.0\npycparser==2.21\npyfaidx==0.7.1\nPygments==2.14.0\npyparsing==3.1.4\npytest==7.0.1\npytest-asyncio==0.16.0\npytest-cov==4.0.0\npytest-mock==3.6.1\npytest-xdist==3.0.2\npython-dateutil==2.9.0.post0\npyzmq==25.1.2\nrequests==2.27.1\nsimplejson==3.20.1\nsix==1.17.0\ntomli==1.2.3\ntraitlets==4.3.3\ntyping_extensions==4.1.1\nurllib3==1.26.20\nwcwidth==0.2.13\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: dark-matter\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - attrs==22.2.0\n - backcall==0.2.0\n - biopython==1.79\n - bz2file==0.98\n - cffi==1.15.1\n - charset-normalizer==2.0.12\n - coverage==6.2\n - cycler==0.11.0\n - decorator==5.1.1\n - execnet==1.9.0\n - idna==3.10\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - ipython==7.16.3\n - ipython-genutils==0.2.0\n - jedi==0.17.2\n - kiwisolver==1.3.1\n - matplotlib==3.3.4\n - numpy==1.19.5\n - packaging==21.3\n - parso==0.7.1\n - pexpect==4.9.0\n - pickleshare==0.7.5\n - pillow==8.4.0\n - pluggy==1.0.0\n - prompt-toolkit==3.0.36\n - ptyprocess==0.7.0\n - py==1.11.0\n - pycparser==2.21\n - pyfaidx==0.7.1\n - pygments==2.14.0\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pytest-asyncio==0.16.0\n - pytest-cov==4.0.0\n - pytest-mock==3.6.1\n - pytest-xdist==3.0.2\n - python-dateutil==2.9.0.post0\n - pyzmq==25.1.2\n - requests==2.27.1\n - simplejson==3.20.1\n - six==1.17.0\n - tomli==1.2.3\n - traitlets==4.3.3\n - typing-extensions==4.1.1\n - urllib3==1.26.20\n - wcwidth==0.2.13\n - zipp==3.6.0\nprefix: /opt/conda/envs/dark-matter\n"},"FAIL_TO_PASS":{"kind":"list like","value":["test/test_reads.py::TestReadsFiltering::testIdLambda","test/test_reads.py::TestReadsFiltering::testIdLambdaReturningNone","test/test_reads.py::TestReadsFiltering::testReadLambda","test/test_reads.py::TestReadsFiltering::testReadLambdaReturningNone"],"string":"[\n \"test/test_reads.py::TestReadsFiltering::testIdLambda\",\n \"test/test_reads.py::TestReadsFiltering::testIdLambdaReturningNone\",\n \"test/test_reads.py::TestReadsFiltering::testReadLambda\",\n \"test/test_reads.py::TestReadsFiltering::testReadLambdaReturningNone\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["test/test_reads.py::TestReadsInRAM::testFastaFile"],"string":"[\n \"test/test_reads.py::TestReadsInRAM::testFastaFile\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["test/test_reads.py::TestRead::testCasePreservation","test/test_reads.py::TestRead::testCheckAlphabetAAReadMatchingReturnTrue","test/test_reads.py::TestRead::testCheckAlphabetAAReadNotMatchingRaise","test/test_reads.py::TestRead::testCheckAlphabetDNAReadMatchingReturnTrue","test/test_reads.py::TestRead::testCheckAlphabetDNAReadNotMatchingRaise","test/test_reads.py::TestRead::testCheckAlphabetwithReadMustBePermissive","test/test_reads.py::TestRead::testEquality","test/test_reads.py::TestRead::testEqualityWithDifferingIds","test/test_reads.py::TestRead::testEqualityWithDifferingQuality","test/test_reads.py::TestRead::testEqualityWithDifferingSequences","test/test_reads.py::TestRead::testEqualityWithNoQuality","test/test_reads.py::TestRead::testEqualityWithOneOmittedQuality","test/test_reads.py::TestRead::testExpectedAttributes","test/test_reads.py::TestRead::testFromDict","test/test_reads.py::TestRead::testFromDictNoQuality","test/test_reads.py::TestRead::testGetitemFullCopy","test/test_reads.py::TestRead::testGetitemId","test/test_reads.py::TestRead::testGetitemLength","test/test_reads.py::TestRead::testGetitemQuality","test/test_reads.py::TestRead::testGetitemReturnsNewRead","test/test_reads.py::TestRead::testGetitemReversed","test/test_reads.py::TestRead::testGetitemSequence","test/test_reads.py::TestRead::testGetitemSingleIndex","test/test_reads.py::TestRead::testGetitemWithStep","test/test_reads.py::TestRead::testHashDiffersIfIdDiffers","test/test_reads.py::TestRead::testHashDiffersIfQualityDiffers","test/test_reads.py::TestRead::testHashDiffersIfSequenceDiffers","test/test_reads.py::TestRead::testHashIdenticalNoQuality","test/test_reads.py::TestRead::testHashIdenticalWithQuality","test/test_reads.py::TestRead::testHashViaDict","test/test_reads.py::TestRead::testHashViaSet","test/test_reads.py::TestRead::testKeepSites","test/test_reads.py::TestRead::testKeepSitesAllSites","test/test_reads.py::TestRead::testKeepSitesNoSites","test/test_reads.py::TestRead::testKeepSitesOutOfRange","test/test_reads.py::TestRead::testKeepSitesWithQuality","test/test_reads.py::TestRead::testLength","test/test_reads.py::TestRead::testLowComplexityFraction","test/test_reads.py::TestRead::testLowComplexityFractionEmptySequence","test/test_reads.py::TestRead::testLowComplexityFractionOne","test/test_reads.py::TestRead::testLowComplexityFractionZero","test/test_reads.py::TestRead::testNoQuality","test/test_reads.py::TestRead::testRemoveSites","test/test_reads.py::TestRead::testRemoveSitesAllSites","test/test_reads.py::TestRead::testRemoveSitesNoSites","test/test_reads.py::TestRead::testRemoveSitesOutOfRange","test/test_reads.py::TestRead::testRemoveSitesWithQuality","test/test_reads.py::TestRead::testToDict","test/test_reads.py::TestRead::testToDictNoQuality","test/test_reads.py::TestRead::testToFASTA","test/test_reads.py::TestRead::testToFASTAWithQuality","test/test_reads.py::TestRead::testToFASTQ","test/test_reads.py::TestRead::testToFASTQWithNoQuality","test/test_reads.py::TestRead::testToUnknownFormat","test/test_reads.py::TestRead::testUnequalLengths","test/test_reads.py::TestRead::testWalkHSPExactMatch","test/test_reads.py::TestRead::testWalkHSPExactMatchWithGap","test/test_reads.py::TestRead::testWalkHSPLeftAndRightOverhangingMatch","test/test_reads.py::TestRead::testWalkHSPLeftAndRightOverhangingMatchNoWhiskers","test/test_reads.py::TestRead::testWalkHSPLeftOverhangingMatch","test/test_reads.py::TestRead::testWalkHSPLeftOverhangingMatchNoWhiskers","test/test_reads.py::TestRead::testWalkHSPRightOverhangingMatch","test/test_reads.py::TestRead::testWalkHSPRightOverhangingMatchNoWhiskers","test/test_reads.py::TestDNARead::testGetitemReturnsNewDNARead","test/test_reads.py::TestDNARead::testReverseComplement","test/test_reads.py::TestDNARead::testReverseComplementAmbiguous","test/test_reads.py::TestDNARead::testReverseComplementReversesQuality","test/test_reads.py::TestDNARead::testTranslationOfMultipleStopCodons","test/test_reads.py::TestDNARead::testTranslationOfStartCodonATG","test/test_reads.py::TestDNARead::testTranslationOfStopCodonTAG","test/test_reads.py::TestDNARead::testTranslationOfStopCodonTGA","test/test_reads.py::TestDNARead::testTranslations","test/test_reads.py::TestDNARead::testTranslationsOfEmptySequence","test/test_reads.py::TestDNARead::testTranslationsOfOneBaseSequence","test/test_reads.py::TestDNARead::testTranslationsOfTwoBaseSequence","test/test_reads.py::TestRNARead::testGetitemReturnsNewRNARead","test/test_reads.py::TestRNARead::testReverseComplement","test/test_reads.py::TestRNARead::testReverseComplementAmbiguous","test/test_reads.py::TestRNARead::testTranslationOfStopCodonUAA","test/test_reads.py::TestAARead::testCloseCloseORF","test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseORF","test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseCloseORF","test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseCloseORFWithJunk","test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseOpenORF","test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseOpenORFWithJunk","test/test_reads.py::TestAARead::testCloseCloseThenCloseOpenORF","test/test_reads.py::TestAARead::testCloseOpenORF","test/test_reads.py::TestAARead::testCloseOpenORFWithMultipleStarts","test/test_reads.py::TestAARead::testGetitemReturnsNewAARead","test/test_reads.py::TestAARead::testNoStartCodon_GithubIssue239","test/test_reads.py::TestAARead::testORFsEmptySequence","test/test_reads.py::TestAARead::testORFsEmptySequenceWithStart","test/test_reads.py::TestAARead::testORFsEmptySequenceWithStartStop","test/test_reads.py::TestAARead::testORFsWithJustStartsAndStops","test/test_reads.py::TestAARead::testORFsWithOneStopCodon","test/test_reads.py::TestAARead::testORFsWithTwoStopCodons","test/test_reads.py::TestAARead::testOpenCloseORF","test/test_reads.py::TestAARead::testOpenCloseORFWithMultipleStops","test/test_reads.py::TestAARead::testOpenCloseThenCloseCloseThenCloseOpenORF","test/test_reads.py::TestAARead::testOpenCloseThenCloseCloseThenCloseOpenORFWithJunk","test/test_reads.py::TestAARead::testOpenCloseThenCloseOpenORF","test/test_reads.py::TestAARead::testOpenOpenORF","test/test_reads.py::TestAARead::testPropertiesCorrectTranslation","test/test_reads.py::TestAARead::testPropertyDetailsCorrectTranslation","test/test_reads.py::TestAAReadWithX::testAlphabet","test/test_reads.py::TestAAReadWithX::testAlphabetChecking","test/test_reads.py::TestAAReadWithX::testGetitemReturnsNewAAReadWithX","test/test_reads.py::TestAAReadORF::testClosedClosedId","test/test_reads.py::TestAAReadORF::testClosedOpenId","test/test_reads.py::TestAAReadORF::testFromDict","test/test_reads.py::TestAAReadORF::testOpenClosedId","test/test_reads.py::TestAAReadORF::testOpenLeft","test/test_reads.py::TestAAReadORF::testOpenOpenId","test/test_reads.py::TestAAReadORF::testOpenRight","test/test_reads.py::TestAAReadORF::testSequence","test/test_reads.py::TestAAReadORF::testStart","test/test_reads.py::TestAAReadORF::testStartGreaterThanStop","test/test_reads.py::TestAAReadORF::testStartNegative","test/test_reads.py::TestAAReadORF::testStop","test/test_reads.py::TestAAReadORF::testStopGreaterThanOriginalSequenceLength","test/test_reads.py::TestAAReadORF::testToDict","test/test_reads.py::TestSSAARead::testCorrectAttributes","test/test_reads.py::TestSSAARead::testFromDict","test/test_reads.py::TestSSAARead::testGetitemFullCopy","test/test_reads.py::TestSSAARead::testGetitemId","test/test_reads.py::TestSSAARead::testGetitemLength","test/test_reads.py::TestSSAARead::testGetitemReturnsNewRead","test/test_reads.py::TestSSAARead::testGetitemReversed","test/test_reads.py::TestSSAARead::testGetitemSequence","test/test_reads.py::TestSSAARead::testGetitemSingleIndex","test/test_reads.py::TestSSAARead::testGetitemStructure","test/test_reads.py::TestSSAARead::testGetitemWithStep","test/test_reads.py::TestSSAARead::testHashDiffersIfIdDiffers","test/test_reads.py::TestSSAARead::testHashDiffersIfSequenceDiffers","test/test_reads.py::TestSSAARead::testHashDiffersIfStructureDiffers","test/test_reads.py::TestSSAARead::testHashViaDict","test/test_reads.py::TestSSAARead::testHashViaSet","test/test_reads.py::TestSSAARead::testKeepSites","test/test_reads.py::TestSSAARead::testKeepSitesAllSites","test/test_reads.py::TestSSAARead::testKeepSitesNoSites","test/test_reads.py::TestSSAARead::testKeepSitesOutOfRange","test/test_reads.py::TestSSAARead::testReads","test/test_reads.py::TestSSAARead::testRemoveSites","test/test_reads.py::TestSSAARead::testRemoveSitesAllSites","test/test_reads.py::TestSSAARead::testRemoveSitesNoSites","test/test_reads.py::TestSSAARead::testRemoveSitesOutOfRange","test/test_reads.py::TestSSAARead::testSequenceLengthMatchesStructureLength","test/test_reads.py::TestSSAARead::testToDict","test/test_reads.py::TestSSAARead::testToString","test/test_reads.py::TestSSAARead::testToStringWithExplicitFastaFormat","test/test_reads.py::TestSSAARead::testToStringWithExplicitFastaSSFormat","test/test_reads.py::TestSSAARead::testToStringWithStructureSuffix","test/test_reads.py::TestSSAARead::testToStringWithUnknownFormat","test/test_reads.py::TestSSAAReadWithX::testCorrectAttributes","test/test_reads.py::TestSSAAReadWithX::testFromDict","test/test_reads.py::TestSSAAReadWithX::testGetitemFullCopy","test/test_reads.py::TestSSAAReadWithX::testGetitemId","test/test_reads.py::TestSSAAReadWithX::testGetitemLength","test/test_reads.py::TestSSAAReadWithX::testGetitemReturnsNewRead","test/test_reads.py::TestSSAAReadWithX::testGetitemReversed","test/test_reads.py::TestSSAAReadWithX::testGetitemSequence","test/test_reads.py::TestSSAAReadWithX::testGetitemSingleIndex","test/test_reads.py::TestSSAAReadWithX::testGetitemStructure","test/test_reads.py::TestSSAAReadWithX::testGetitemWithStep","test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfIdDiffers","test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfSequenceDiffers","test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfStructureDiffers","test/test_reads.py::TestSSAAReadWithX::testHashViaDict","test/test_reads.py::TestSSAAReadWithX::testHashViaSet","test/test_reads.py::TestSSAAReadWithX::testKeepSites","test/test_reads.py::TestSSAAReadWithX::testKeepSitesAllSites","test/test_reads.py::TestSSAAReadWithX::testKeepSitesNoSites","test/test_reads.py::TestSSAAReadWithX::testKeepSitesOutOfRange","test/test_reads.py::TestSSAAReadWithX::testReads","test/test_reads.py::TestSSAAReadWithX::testRemoveSites","test/test_reads.py::TestSSAAReadWithX::testRemoveSitesAllSites","test/test_reads.py::TestSSAAReadWithX::testRemoveSitesNoSites","test/test_reads.py::TestSSAAReadWithX::testRemoveSitesOutOfRange","test/test_reads.py::TestSSAAReadWithX::testSequenceContainingX","test/test_reads.py::TestSSAAReadWithX::testSequenceLengthMatchesStructureLength","test/test_reads.py::TestSSAAReadWithX::testToDict","test/test_reads.py::TestSSAAReadWithX::testToString","test/test_reads.py::TestSSAAReadWithX::testToStringWithExplicitFastaFormat","test/test_reads.py::TestSSAAReadWithX::testToStringWithExplicitFastaSSFormat","test/test_reads.py::TestSSAAReadWithX::testToStringWithStructureSuffix","test/test_reads.py::TestSSAAReadWithX::testToStringWithUnknownFormat","test/test_reads.py::TestTranslatedRead::testExpectedAttributes","test/test_reads.py::TestTranslatedRead::testExpectedFrame","test/test_reads.py::TestTranslatedRead::testFromDict","test/test_reads.py::TestTranslatedRead::testId","test/test_reads.py::TestTranslatedRead::testIdReverseComplemented","test/test_reads.py::TestTranslatedRead::testMaximumORFLength","test/test_reads.py::TestTranslatedRead::testMaximumORFLengthNoStops","test/test_reads.py::TestTranslatedRead::testOutOfRangeFrame","test/test_reads.py::TestTranslatedRead::testReverseComplemented","test/test_reads.py::TestTranslatedRead::testSequence","test/test_reads.py::TestTranslatedRead::testToDict","test/test_reads.py::TestReadClassNameToClass::testNames","test/test_reads.py::TestReads::testEmptyInitialReads","test/test_reads.py::TestReads::testInitialReads","test/test_reads.py::TestReads::testManuallyAddedReads","test/test_reads.py::TestReads::testManuallyAddedReadsLength","test/test_reads.py::TestReads::testNoReads","test/test_reads.py::TestReads::testNoReadsLength","test/test_reads.py::TestReads::testRepeatedIter","test/test_reads.py::TestReads::testSaveAsFASTA","test/test_reads.py::TestReads::testSaveAsFASTQ","test/test_reads.py::TestReads::testSaveAsFASTQFailsOnReadWithNoQuality","test/test_reads.py::TestReads::testSaveFASTAIsDefault","test/test_reads.py::TestReads::testSaveReturnsReadCount","test/test_reads.py::TestReads::testSaveToFileDescriptor","test/test_reads.py::TestReads::testSaveWithUnknownFormat","test/test_reads.py::TestReads::testSaveWithUppercaseFormat","test/test_reads.py::TestReads::testSubclass","test/test_reads.py::TestReads::testSubclassLength","test/test_reads.py::TestReads::testSubclassWithAdditionalReads","test/test_reads.py::TestReads::testUnfilteredLengthAdditionalReads","test/test_reads.py::TestReads::testUnfilteredLengthAdditionalReadsAfterFiltering","test/test_reads.py::TestReads::testUnfilteredLengthBeforeIterating","test/test_reads.py::TestReads::testUnfilteredLengthInitialReads","test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsAfterFiltering","test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsIsReads","test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsIsReadsWithAdditional","test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassThenFiltered","test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassWithAdditionalThenFiltered","test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassWithNoLen","test/test_reads.py::TestReads::testUnfilteredLengthNoReads","test/test_reads.py::TestReadsFiltering::testAddFiltersThenClearFilters","test/test_reads.py::TestReadsFiltering::testFilterBlacklist","test/test_reads.py::TestReadsFiltering::testFilterDoNotRemoveDescriptions","test/test_reads.py::TestReadsFiltering::testFilterDuplicates","test/test_reads.py::TestReadsFiltering::testFilterDuplicatesById","test/test_reads.py::TestReadsFiltering::testFilterHead","test/test_reads.py::TestReadsFiltering::testFilterHeadZero","test/test_reads.py::TestReadsFiltering::testFilterKeepSequences","test/test_reads.py::TestReadsFiltering::testFilterKeepSequencesNoSequences","test/test_reads.py::TestReadsFiltering::testFilterNegativeRegex","test/test_reads.py::TestReadsFiltering::testFilterNoArgs","test/test_reads.py::TestReadsFiltering::testFilterOnLengthEverythingMatches","test/test_reads.py::TestReadsFiltering::testFilterOnLengthNothingMatches","test/test_reads.py::TestReadsFiltering::testFilterOnMaxLength","test/test_reads.py::TestReadsFiltering::testFilterOnMinLength","test/test_reads.py::TestReadsFiltering::testFilterPositiveRegex","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfFiveFromFiveReads","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfFiveFromOneRead","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfOneFromOneRead","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfTwoFromFiveReads","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfZeroReads","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetSizeZeroNoReads","test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetSizeZeroTwoReads","test/test_reads.py::TestReadsFiltering::testFilterRemoveDescriptions","test/test_reads.py::TestReadsFiltering::testFilterRemoveGaps","test/test_reads.py::TestReadsFiltering::testFilterRemoveGapsWithQuality","test/test_reads.py::TestReadsFiltering::testFilterRemoveSequences","test/test_reads.py::TestReadsFiltering::testFilterRemoveSequencesNoSequences","test/test_reads.py::TestReadsFiltering::testFilterReturnsReadInstance","test/test_reads.py::TestReadsFiltering::testFilterTruncateTitles","test/test_reads.py::TestReadsFiltering::testFilterWhitelist","test/test_reads.py::TestReadsFiltering::testFilterWithMinLengthEqualToMaxLength","test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatChangesIds","test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatOmits","test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatOmitsAndChangesIds","test/test_reads.py::TestReadsFiltering::testFilteredReadsInstanceHasExpectedLength","test/test_reads.py::TestReadsFiltering::testKeepSites","test/test_reads.py::TestReadsFiltering::testKeepSitesAllSites","test/test_reads.py::TestReadsFiltering::testKeepSitesNoSites","test/test_reads.py::TestReadsFiltering::testKeepSitesOutOfRange","test/test_reads.py::TestReadsFiltering::testKeepSitesWithQuality","test/test_reads.py::TestReadsFiltering::testLineNumberFile","test/test_reads.py::TestReadsFiltering::testLineNumberFileEmpty","test/test_reads.py::TestReadsFiltering::testLineNumberFileFirstLineTooSmall","test/test_reads.py::TestReadsFiltering::testLineNumberFileNonAscending","test/test_reads.py::TestReadsFiltering::testLineNumberFileRunOutOfSequences","test/test_reads.py::TestReadsFiltering::testRemoveAndKeepSites","test/test_reads.py::TestReadsFiltering::testRemoveSites","test/test_reads.py::TestReadsFiltering::testRemoveSitesAllSites","test/test_reads.py::TestReadsFiltering::testRemoveSitesNoSites","test/test_reads.py::TestReadsFiltering::testRemoveSitesOutOfRange","test/test_reads.py::TestReadsFiltering::testRemoveSitesWithQuality","test/test_reads.py::TestReadsFiltering::testSampleFractionAndNoTrueLengthRaisesValueError","test/test_reads.py::TestReadsFiltering::testSampleFractionAndRandomSubsetRaisesValueError","test/test_reads.py::TestReadsFiltering::testSampleFractionOne","test/test_reads.py::TestReadsFiltering::testSampleFractionPointOne","test/test_reads.py::TestReadsFiltering::testSampleFractionZero","test/test_reads.py::TestReadsInRAM::testAdd","test/test_reads.py::TestReadsInRAM::testFromReads","test/test_reads.py::TestReadsInRAM::testNoReads","test/test_reads.py::TestReadsInRAM::testOneReadIndex","test/test_reads.py::TestReadsInRAM::testOneReadLength","test/test_reads.py::TestReadsInRAM::testOneReadList","test/test_reads.py::TestReadsInRAM::testSetItem","test/test_reads.py::TestReadsInRAM::testTwoReadsIndex","test/test_reads.py::TestReadsInRAM::testTwoReadsLength","test/test_reads.py::TestReadsInRAM::testTwoReadsList","test/test_reads.py::TestSummarizePosition::testCorrectFrequencies","test/test_reads.py::TestSummarizePosition::testExcludeShortSequences","test/test_reads.py::TestSummarizePosition::testFrequenciesNoReads","test/test_reads.py::TestSummarizePosition::testIndexLargerThanSequenceLength","test/test_reads.py::TestSummarizePosition::testNumberOfExclusionsNoReads","test/test_reads.py::TestSitesMatching::testAllMatches","test/test_reads.py::TestSitesMatching::testIgnoreCase","test/test_reads.py::TestSitesMatching::testMatchCase","test/test_reads.py::TestSitesMatching::testMultipleReadsAll","test/test_reads.py::TestSitesMatching::testMultipleReadsAllWithDifferingLengths","test/test_reads.py::TestSitesMatching::testMultipleReadsAny","test/test_reads.py::TestSitesMatching::testMultipleReadsAnyWithDifferingLengths","test/test_reads.py::TestSitesMatching::testNoMatches","test/test_reads.py::TestSitesMatching::testPartialMatch"],"string":"[\n \"test/test_reads.py::TestRead::testCasePreservation\",\n \"test/test_reads.py::TestRead::testCheckAlphabetAAReadMatchingReturnTrue\",\n \"test/test_reads.py::TestRead::testCheckAlphabetAAReadNotMatchingRaise\",\n \"test/test_reads.py::TestRead::testCheckAlphabetDNAReadMatchingReturnTrue\",\n \"test/test_reads.py::TestRead::testCheckAlphabetDNAReadNotMatchingRaise\",\n \"test/test_reads.py::TestRead::testCheckAlphabetwithReadMustBePermissive\",\n \"test/test_reads.py::TestRead::testEquality\",\n \"test/test_reads.py::TestRead::testEqualityWithDifferingIds\",\n \"test/test_reads.py::TestRead::testEqualityWithDifferingQuality\",\n \"test/test_reads.py::TestRead::testEqualityWithDifferingSequences\",\n \"test/test_reads.py::TestRead::testEqualityWithNoQuality\",\n \"test/test_reads.py::TestRead::testEqualityWithOneOmittedQuality\",\n \"test/test_reads.py::TestRead::testExpectedAttributes\",\n \"test/test_reads.py::TestRead::testFromDict\",\n \"test/test_reads.py::TestRead::testFromDictNoQuality\",\n \"test/test_reads.py::TestRead::testGetitemFullCopy\",\n \"test/test_reads.py::TestRead::testGetitemId\",\n \"test/test_reads.py::TestRead::testGetitemLength\",\n \"test/test_reads.py::TestRead::testGetitemQuality\",\n \"test/test_reads.py::TestRead::testGetitemReturnsNewRead\",\n \"test/test_reads.py::TestRead::testGetitemReversed\",\n \"test/test_reads.py::TestRead::testGetitemSequence\",\n \"test/test_reads.py::TestRead::testGetitemSingleIndex\",\n \"test/test_reads.py::TestRead::testGetitemWithStep\",\n \"test/test_reads.py::TestRead::testHashDiffersIfIdDiffers\",\n \"test/test_reads.py::TestRead::testHashDiffersIfQualityDiffers\",\n \"test/test_reads.py::TestRead::testHashDiffersIfSequenceDiffers\",\n \"test/test_reads.py::TestRead::testHashIdenticalNoQuality\",\n \"test/test_reads.py::TestRead::testHashIdenticalWithQuality\",\n \"test/test_reads.py::TestRead::testHashViaDict\",\n \"test/test_reads.py::TestRead::testHashViaSet\",\n \"test/test_reads.py::TestRead::testKeepSites\",\n \"test/test_reads.py::TestRead::testKeepSitesAllSites\",\n \"test/test_reads.py::TestRead::testKeepSitesNoSites\",\n \"test/test_reads.py::TestRead::testKeepSitesOutOfRange\",\n \"test/test_reads.py::TestRead::testKeepSitesWithQuality\",\n \"test/test_reads.py::TestRead::testLength\",\n \"test/test_reads.py::TestRead::testLowComplexityFraction\",\n \"test/test_reads.py::TestRead::testLowComplexityFractionEmptySequence\",\n \"test/test_reads.py::TestRead::testLowComplexityFractionOne\",\n \"test/test_reads.py::TestRead::testLowComplexityFractionZero\",\n \"test/test_reads.py::TestRead::testNoQuality\",\n \"test/test_reads.py::TestRead::testRemoveSites\",\n \"test/test_reads.py::TestRead::testRemoveSitesAllSites\",\n \"test/test_reads.py::TestRead::testRemoveSitesNoSites\",\n \"test/test_reads.py::TestRead::testRemoveSitesOutOfRange\",\n \"test/test_reads.py::TestRead::testRemoveSitesWithQuality\",\n \"test/test_reads.py::TestRead::testToDict\",\n \"test/test_reads.py::TestRead::testToDictNoQuality\",\n \"test/test_reads.py::TestRead::testToFASTA\",\n \"test/test_reads.py::TestRead::testToFASTAWithQuality\",\n \"test/test_reads.py::TestRead::testToFASTQ\",\n \"test/test_reads.py::TestRead::testToFASTQWithNoQuality\",\n \"test/test_reads.py::TestRead::testToUnknownFormat\",\n \"test/test_reads.py::TestRead::testUnequalLengths\",\n \"test/test_reads.py::TestRead::testWalkHSPExactMatch\",\n \"test/test_reads.py::TestRead::testWalkHSPExactMatchWithGap\",\n \"test/test_reads.py::TestRead::testWalkHSPLeftAndRightOverhangingMatch\",\n \"test/test_reads.py::TestRead::testWalkHSPLeftAndRightOverhangingMatchNoWhiskers\",\n \"test/test_reads.py::TestRead::testWalkHSPLeftOverhangingMatch\",\n \"test/test_reads.py::TestRead::testWalkHSPLeftOverhangingMatchNoWhiskers\",\n \"test/test_reads.py::TestRead::testWalkHSPRightOverhangingMatch\",\n \"test/test_reads.py::TestRead::testWalkHSPRightOverhangingMatchNoWhiskers\",\n \"test/test_reads.py::TestDNARead::testGetitemReturnsNewDNARead\",\n \"test/test_reads.py::TestDNARead::testReverseComplement\",\n \"test/test_reads.py::TestDNARead::testReverseComplementAmbiguous\",\n \"test/test_reads.py::TestDNARead::testReverseComplementReversesQuality\",\n \"test/test_reads.py::TestDNARead::testTranslationOfMultipleStopCodons\",\n \"test/test_reads.py::TestDNARead::testTranslationOfStartCodonATG\",\n \"test/test_reads.py::TestDNARead::testTranslationOfStopCodonTAG\",\n \"test/test_reads.py::TestDNARead::testTranslationOfStopCodonTGA\",\n \"test/test_reads.py::TestDNARead::testTranslations\",\n \"test/test_reads.py::TestDNARead::testTranslationsOfEmptySequence\",\n \"test/test_reads.py::TestDNARead::testTranslationsOfOneBaseSequence\",\n \"test/test_reads.py::TestDNARead::testTranslationsOfTwoBaseSequence\",\n \"test/test_reads.py::TestRNARead::testGetitemReturnsNewRNARead\",\n \"test/test_reads.py::TestRNARead::testReverseComplement\",\n \"test/test_reads.py::TestRNARead::testReverseComplementAmbiguous\",\n \"test/test_reads.py::TestRNARead::testTranslationOfStopCodonUAA\",\n \"test/test_reads.py::TestAARead::testCloseCloseORF\",\n \"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseORF\",\n \"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseCloseORF\",\n \"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseCloseORFWithJunk\",\n \"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseOpenORF\",\n \"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseOpenORFWithJunk\",\n \"test/test_reads.py::TestAARead::testCloseCloseThenCloseOpenORF\",\n \"test/test_reads.py::TestAARead::testCloseOpenORF\",\n \"test/test_reads.py::TestAARead::testCloseOpenORFWithMultipleStarts\",\n \"test/test_reads.py::TestAARead::testGetitemReturnsNewAARead\",\n \"test/test_reads.py::TestAARead::testNoStartCodon_GithubIssue239\",\n \"test/test_reads.py::TestAARead::testORFsEmptySequence\",\n \"test/test_reads.py::TestAARead::testORFsEmptySequenceWithStart\",\n \"test/test_reads.py::TestAARead::testORFsEmptySequenceWithStartStop\",\n \"test/test_reads.py::TestAARead::testORFsWithJustStartsAndStops\",\n \"test/test_reads.py::TestAARead::testORFsWithOneStopCodon\",\n \"test/test_reads.py::TestAARead::testORFsWithTwoStopCodons\",\n \"test/test_reads.py::TestAARead::testOpenCloseORF\",\n \"test/test_reads.py::TestAARead::testOpenCloseORFWithMultipleStops\",\n \"test/test_reads.py::TestAARead::testOpenCloseThenCloseCloseThenCloseOpenORF\",\n \"test/test_reads.py::TestAARead::testOpenCloseThenCloseCloseThenCloseOpenORFWithJunk\",\n \"test/test_reads.py::TestAARead::testOpenCloseThenCloseOpenORF\",\n \"test/test_reads.py::TestAARead::testOpenOpenORF\",\n \"test/test_reads.py::TestAARead::testPropertiesCorrectTranslation\",\n \"test/test_reads.py::TestAARead::testPropertyDetailsCorrectTranslation\",\n \"test/test_reads.py::TestAAReadWithX::testAlphabet\",\n \"test/test_reads.py::TestAAReadWithX::testAlphabetChecking\",\n \"test/test_reads.py::TestAAReadWithX::testGetitemReturnsNewAAReadWithX\",\n \"test/test_reads.py::TestAAReadORF::testClosedClosedId\",\n \"test/test_reads.py::TestAAReadORF::testClosedOpenId\",\n \"test/test_reads.py::TestAAReadORF::testFromDict\",\n \"test/test_reads.py::TestAAReadORF::testOpenClosedId\",\n \"test/test_reads.py::TestAAReadORF::testOpenLeft\",\n \"test/test_reads.py::TestAAReadORF::testOpenOpenId\",\n \"test/test_reads.py::TestAAReadORF::testOpenRight\",\n \"test/test_reads.py::TestAAReadORF::testSequence\",\n \"test/test_reads.py::TestAAReadORF::testStart\",\n \"test/test_reads.py::TestAAReadORF::testStartGreaterThanStop\",\n \"test/test_reads.py::TestAAReadORF::testStartNegative\",\n \"test/test_reads.py::TestAAReadORF::testStop\",\n \"test/test_reads.py::TestAAReadORF::testStopGreaterThanOriginalSequenceLength\",\n \"test/test_reads.py::TestAAReadORF::testToDict\",\n \"test/test_reads.py::TestSSAARead::testCorrectAttributes\",\n \"test/test_reads.py::TestSSAARead::testFromDict\",\n \"test/test_reads.py::TestSSAARead::testGetitemFullCopy\",\n \"test/test_reads.py::TestSSAARead::testGetitemId\",\n \"test/test_reads.py::TestSSAARead::testGetitemLength\",\n \"test/test_reads.py::TestSSAARead::testGetitemReturnsNewRead\",\n \"test/test_reads.py::TestSSAARead::testGetitemReversed\",\n \"test/test_reads.py::TestSSAARead::testGetitemSequence\",\n \"test/test_reads.py::TestSSAARead::testGetitemSingleIndex\",\n \"test/test_reads.py::TestSSAARead::testGetitemStructure\",\n \"test/test_reads.py::TestSSAARead::testGetitemWithStep\",\n \"test/test_reads.py::TestSSAARead::testHashDiffersIfIdDiffers\",\n \"test/test_reads.py::TestSSAARead::testHashDiffersIfSequenceDiffers\",\n \"test/test_reads.py::TestSSAARead::testHashDiffersIfStructureDiffers\",\n \"test/test_reads.py::TestSSAARead::testHashViaDict\",\n \"test/test_reads.py::TestSSAARead::testHashViaSet\",\n \"test/test_reads.py::TestSSAARead::testKeepSites\",\n \"test/test_reads.py::TestSSAARead::testKeepSitesAllSites\",\n \"test/test_reads.py::TestSSAARead::testKeepSitesNoSites\",\n \"test/test_reads.py::TestSSAARead::testKeepSitesOutOfRange\",\n \"test/test_reads.py::TestSSAARead::testReads\",\n \"test/test_reads.py::TestSSAARead::testRemoveSites\",\n \"test/test_reads.py::TestSSAARead::testRemoveSitesAllSites\",\n \"test/test_reads.py::TestSSAARead::testRemoveSitesNoSites\",\n \"test/test_reads.py::TestSSAARead::testRemoveSitesOutOfRange\",\n \"test/test_reads.py::TestSSAARead::testSequenceLengthMatchesStructureLength\",\n \"test/test_reads.py::TestSSAARead::testToDict\",\n \"test/test_reads.py::TestSSAARead::testToString\",\n \"test/test_reads.py::TestSSAARead::testToStringWithExplicitFastaFormat\",\n \"test/test_reads.py::TestSSAARead::testToStringWithExplicitFastaSSFormat\",\n \"test/test_reads.py::TestSSAARead::testToStringWithStructureSuffix\",\n \"test/test_reads.py::TestSSAARead::testToStringWithUnknownFormat\",\n \"test/test_reads.py::TestSSAAReadWithX::testCorrectAttributes\",\n \"test/test_reads.py::TestSSAAReadWithX::testFromDict\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemFullCopy\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemId\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemLength\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemReturnsNewRead\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemReversed\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemSequence\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemSingleIndex\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemStructure\",\n \"test/test_reads.py::TestSSAAReadWithX::testGetitemWithStep\",\n \"test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfIdDiffers\",\n \"test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfSequenceDiffers\",\n \"test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfStructureDiffers\",\n \"test/test_reads.py::TestSSAAReadWithX::testHashViaDict\",\n \"test/test_reads.py::TestSSAAReadWithX::testHashViaSet\",\n \"test/test_reads.py::TestSSAAReadWithX::testKeepSites\",\n \"test/test_reads.py::TestSSAAReadWithX::testKeepSitesAllSites\",\n \"test/test_reads.py::TestSSAAReadWithX::testKeepSitesNoSites\",\n \"test/test_reads.py::TestSSAAReadWithX::testKeepSitesOutOfRange\",\n \"test/test_reads.py::TestSSAAReadWithX::testReads\",\n \"test/test_reads.py::TestSSAAReadWithX::testRemoveSites\",\n \"test/test_reads.py::TestSSAAReadWithX::testRemoveSitesAllSites\",\n \"test/test_reads.py::TestSSAAReadWithX::testRemoveSitesNoSites\",\n \"test/test_reads.py::TestSSAAReadWithX::testRemoveSitesOutOfRange\",\n \"test/test_reads.py::TestSSAAReadWithX::testSequenceContainingX\",\n \"test/test_reads.py::TestSSAAReadWithX::testSequenceLengthMatchesStructureLength\",\n \"test/test_reads.py::TestSSAAReadWithX::testToDict\",\n \"test/test_reads.py::TestSSAAReadWithX::testToString\",\n \"test/test_reads.py::TestSSAAReadWithX::testToStringWithExplicitFastaFormat\",\n \"test/test_reads.py::TestSSAAReadWithX::testToStringWithExplicitFastaSSFormat\",\n \"test/test_reads.py::TestSSAAReadWithX::testToStringWithStructureSuffix\",\n \"test/test_reads.py::TestSSAAReadWithX::testToStringWithUnknownFormat\",\n \"test/test_reads.py::TestTranslatedRead::testExpectedAttributes\",\n \"test/test_reads.py::TestTranslatedRead::testExpectedFrame\",\n \"test/test_reads.py::TestTranslatedRead::testFromDict\",\n \"test/test_reads.py::TestTranslatedRead::testId\",\n \"test/test_reads.py::TestTranslatedRead::testIdReverseComplemented\",\n \"test/test_reads.py::TestTranslatedRead::testMaximumORFLength\",\n \"test/test_reads.py::TestTranslatedRead::testMaximumORFLengthNoStops\",\n \"test/test_reads.py::TestTranslatedRead::testOutOfRangeFrame\",\n \"test/test_reads.py::TestTranslatedRead::testReverseComplemented\",\n \"test/test_reads.py::TestTranslatedRead::testSequence\",\n \"test/test_reads.py::TestTranslatedRead::testToDict\",\n \"test/test_reads.py::TestReadClassNameToClass::testNames\",\n \"test/test_reads.py::TestReads::testEmptyInitialReads\",\n \"test/test_reads.py::TestReads::testInitialReads\",\n \"test/test_reads.py::TestReads::testManuallyAddedReads\",\n \"test/test_reads.py::TestReads::testManuallyAddedReadsLength\",\n \"test/test_reads.py::TestReads::testNoReads\",\n \"test/test_reads.py::TestReads::testNoReadsLength\",\n \"test/test_reads.py::TestReads::testRepeatedIter\",\n \"test/test_reads.py::TestReads::testSaveAsFASTA\",\n \"test/test_reads.py::TestReads::testSaveAsFASTQ\",\n \"test/test_reads.py::TestReads::testSaveAsFASTQFailsOnReadWithNoQuality\",\n \"test/test_reads.py::TestReads::testSaveFASTAIsDefault\",\n \"test/test_reads.py::TestReads::testSaveReturnsReadCount\",\n \"test/test_reads.py::TestReads::testSaveToFileDescriptor\",\n \"test/test_reads.py::TestReads::testSaveWithUnknownFormat\",\n \"test/test_reads.py::TestReads::testSaveWithUppercaseFormat\",\n \"test/test_reads.py::TestReads::testSubclass\",\n \"test/test_reads.py::TestReads::testSubclassLength\",\n \"test/test_reads.py::TestReads::testSubclassWithAdditionalReads\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthAdditionalReads\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthAdditionalReadsAfterFiltering\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthBeforeIterating\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialReads\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsAfterFiltering\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsIsReads\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsIsReadsWithAdditional\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassThenFiltered\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassWithAdditionalThenFiltered\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassWithNoLen\",\n \"test/test_reads.py::TestReads::testUnfilteredLengthNoReads\",\n \"test/test_reads.py::TestReadsFiltering::testAddFiltersThenClearFilters\",\n \"test/test_reads.py::TestReadsFiltering::testFilterBlacklist\",\n \"test/test_reads.py::TestReadsFiltering::testFilterDoNotRemoveDescriptions\",\n \"test/test_reads.py::TestReadsFiltering::testFilterDuplicates\",\n \"test/test_reads.py::TestReadsFiltering::testFilterDuplicatesById\",\n \"test/test_reads.py::TestReadsFiltering::testFilterHead\",\n \"test/test_reads.py::TestReadsFiltering::testFilterHeadZero\",\n \"test/test_reads.py::TestReadsFiltering::testFilterKeepSequences\",\n \"test/test_reads.py::TestReadsFiltering::testFilterKeepSequencesNoSequences\",\n \"test/test_reads.py::TestReadsFiltering::testFilterNegativeRegex\",\n \"test/test_reads.py::TestReadsFiltering::testFilterNoArgs\",\n \"test/test_reads.py::TestReadsFiltering::testFilterOnLengthEverythingMatches\",\n \"test/test_reads.py::TestReadsFiltering::testFilterOnLengthNothingMatches\",\n \"test/test_reads.py::TestReadsFiltering::testFilterOnMaxLength\",\n \"test/test_reads.py::TestReadsFiltering::testFilterOnMinLength\",\n \"test/test_reads.py::TestReadsFiltering::testFilterPositiveRegex\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfFiveFromFiveReads\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfFiveFromOneRead\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfOneFromOneRead\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfTwoFromFiveReads\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfZeroReads\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetSizeZeroNoReads\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetSizeZeroTwoReads\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRemoveDescriptions\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRemoveGaps\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRemoveGapsWithQuality\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRemoveSequences\",\n \"test/test_reads.py::TestReadsFiltering::testFilterRemoveSequencesNoSequences\",\n \"test/test_reads.py::TestReadsFiltering::testFilterReturnsReadInstance\",\n \"test/test_reads.py::TestReadsFiltering::testFilterTruncateTitles\",\n \"test/test_reads.py::TestReadsFiltering::testFilterWhitelist\",\n \"test/test_reads.py::TestReadsFiltering::testFilterWithMinLengthEqualToMaxLength\",\n \"test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatChangesIds\",\n \"test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatOmits\",\n \"test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatOmitsAndChangesIds\",\n \"test/test_reads.py::TestReadsFiltering::testFilteredReadsInstanceHasExpectedLength\",\n \"test/test_reads.py::TestReadsFiltering::testKeepSites\",\n \"test/test_reads.py::TestReadsFiltering::testKeepSitesAllSites\",\n \"test/test_reads.py::TestReadsFiltering::testKeepSitesNoSites\",\n \"test/test_reads.py::TestReadsFiltering::testKeepSitesOutOfRange\",\n \"test/test_reads.py::TestReadsFiltering::testKeepSitesWithQuality\",\n \"test/test_reads.py::TestReadsFiltering::testLineNumberFile\",\n \"test/test_reads.py::TestReadsFiltering::testLineNumberFileEmpty\",\n \"test/test_reads.py::TestReadsFiltering::testLineNumberFileFirstLineTooSmall\",\n \"test/test_reads.py::TestReadsFiltering::testLineNumberFileNonAscending\",\n \"test/test_reads.py::TestReadsFiltering::testLineNumberFileRunOutOfSequences\",\n \"test/test_reads.py::TestReadsFiltering::testRemoveAndKeepSites\",\n \"test/test_reads.py::TestReadsFiltering::testRemoveSites\",\n \"test/test_reads.py::TestReadsFiltering::testRemoveSitesAllSites\",\n \"test/test_reads.py::TestReadsFiltering::testRemoveSitesNoSites\",\n \"test/test_reads.py::TestReadsFiltering::testRemoveSitesOutOfRange\",\n \"test/test_reads.py::TestReadsFiltering::testRemoveSitesWithQuality\",\n \"test/test_reads.py::TestReadsFiltering::testSampleFractionAndNoTrueLengthRaisesValueError\",\n \"test/test_reads.py::TestReadsFiltering::testSampleFractionAndRandomSubsetRaisesValueError\",\n \"test/test_reads.py::TestReadsFiltering::testSampleFractionOne\",\n \"test/test_reads.py::TestReadsFiltering::testSampleFractionPointOne\",\n \"test/test_reads.py::TestReadsFiltering::testSampleFractionZero\",\n \"test/test_reads.py::TestReadsInRAM::testAdd\",\n \"test/test_reads.py::TestReadsInRAM::testFromReads\",\n \"test/test_reads.py::TestReadsInRAM::testNoReads\",\n \"test/test_reads.py::TestReadsInRAM::testOneReadIndex\",\n \"test/test_reads.py::TestReadsInRAM::testOneReadLength\",\n \"test/test_reads.py::TestReadsInRAM::testOneReadList\",\n \"test/test_reads.py::TestReadsInRAM::testSetItem\",\n \"test/test_reads.py::TestReadsInRAM::testTwoReadsIndex\",\n \"test/test_reads.py::TestReadsInRAM::testTwoReadsLength\",\n \"test/test_reads.py::TestReadsInRAM::testTwoReadsList\",\n \"test/test_reads.py::TestSummarizePosition::testCorrectFrequencies\",\n \"test/test_reads.py::TestSummarizePosition::testExcludeShortSequences\",\n \"test/test_reads.py::TestSummarizePosition::testFrequenciesNoReads\",\n \"test/test_reads.py::TestSummarizePosition::testIndexLargerThanSequenceLength\",\n \"test/test_reads.py::TestSummarizePosition::testNumberOfExclusionsNoReads\",\n \"test/test_reads.py::TestSitesMatching::testAllMatches\",\n \"test/test_reads.py::TestSitesMatching::testIgnoreCase\",\n \"test/test_reads.py::TestSitesMatching::testMatchCase\",\n \"test/test_reads.py::TestSitesMatching::testMultipleReadsAll\",\n \"test/test_reads.py::TestSitesMatching::testMultipleReadsAllWithDifferingLengths\",\n \"test/test_reads.py::TestSitesMatching::testMultipleReadsAny\",\n \"test/test_reads.py::TestSitesMatching::testMultipleReadsAnyWithDifferingLengths\",\n \"test/test_reads.py::TestSitesMatching::testNoMatches\",\n \"test/test_reads.py::TestSitesMatching::testPartialMatch\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2592,"string":"2,592"},"num_tokens_patch":{"kind":"number","value":1404,"string":"1,404"},"before_filepaths":{"kind":"list like","value":["dark/__init__.py","dark/filter.py","dark/reads.py"],"string":"[\n \"dark/__init__.py\",\n \"dark/filter.py\",\n \"dark/reads.py\"\n]"}}},{"rowIdx":585,"cells":{"instance_id":{"kind":"string","value":"conan-io__conan-2952"},"base_commit":{"kind":"string","value":"c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759"},"created_at":{"kind":"string","value":"2018-05-29 10:29:36"},"environment_setup_commit":{"kind":"string","value":"c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/conans/client/build/autotools_environment.py b/conans/client/build/autotools_environment.py\nindex 924161e9c..9bf4bd3e8 100644\n--- a/conans/client/build/autotools_environment.py\n+++ b/conans/client/build/autotools_environment.py\n@@ -14,6 +14,7 @@ from conans.client.tools.win import unix_path\n from conans.tools import (environment_append, args_to_string, cpu_count, cross_building,\n detected_architecture, get_gnu_triplet)\n from conans.errors import ConanException\n+from conans.util.files import get_abs_path\n \n \n class AutoToolsBuildEnvironment(object):\n@@ -131,7 +132,9 @@ class AutoToolsBuildEnvironment(object):\n triplet_args.append(\"--target=%s\" % (target or self.target))\n \n if pkg_config_paths:\n- pkg_env = {\"PKG_CONFIG_PATH\": os.pathsep.join(pkg_config_paths)}\n+ pkg_env = {\"PKG_CONFIG_PATH\":\n+ os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)\n+ for f in pkg_config_paths)}\n else:\n # If we are using pkg_config generator automate the pcs location, otherwise it could\n # read wrong files\ndiff --git a/conans/client/build/cmake.py b/conans/client/build/cmake.py\nindex 9964d0836..b5f8cb843 100644\n--- a/conans/client/build/cmake.py\n+++ b/conans/client/build/cmake.py\n@@ -12,7 +12,7 @@ from conans.errors import ConanException\n from conans.model.conan_file import ConanFile\n from conans.model.version import Version\n from conans.util.env_reader import get_env\n-from conans.util.files import mkdir\n+from conans.util.files import mkdir, get_abs_path\n from conans.tools import cpu_count, args_to_string\n from conans import tools\n from conans.util.log import logger\n@@ -28,7 +28,8 @@ def _get_env_cmake_system_name():\n class CMake(object):\n \n def __init__(self, conanfile, generator=None, cmake_system_name=True,\n- parallel=True, build_type=None, toolset=None, make_program=None, set_cmake_flags=False):\n+ parallel=True, build_type=None, toolset=None, make_program=None,\n+ set_cmake_flags=False):\n \"\"\"\n :param settings_or_conanfile: Conanfile instance (or settings for retro compatibility)\n :param generator: Generator name to use or none to autodetect\n@@ -370,7 +371,8 @@ class CMake(object):\n self._conanfile.run(command)\n \n def configure(self, args=None, defs=None, source_dir=None, build_dir=None,\n- source_folder=None, build_folder=None, cache_build_folder=None):\n+ source_folder=None, build_folder=None, cache_build_folder=None,\n+ pkg_config_paths=None):\n \n # TODO: Deprecate source_dir and build_dir in favor of xxx_folder\n if not self._conanfile.should_configure:\n@@ -387,12 +389,26 @@ class CMake(object):\n defs_to_string(defs),\n args_to_string([source_dir])\n ])\n- command = \"cd %s && cmake %s\" % (args_to_string([self.build_dir]), arg_list)\n- if platform.system() == \"Windows\" and self.generator == \"MinGW Makefiles\":\n- with tools.remove_from_path(\"sh\"):\n- self._run(command)\n+\n+\n+ if pkg_config_paths:\n+ pkg_env = {\"PKG_CONFIG_PATH\":\n+ os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)\n+ for f in pkg_config_paths)}\n else:\n- self._run(command)\n+ # If we are using pkg_config generator automate the pcs location, otherwise it could\n+ # read wrong files\n+ set_env = \"pkg_config\" in self._conanfile.generators \\\n+ and \"PKG_CONFIG_PATH\" not in os.environ\n+ pkg_env = {\"PKG_CONFIG_PATH\": self._conanfile.install_folder} if set_env else {}\n+\n+ with tools.environment_append(pkg_env):\n+ command = \"cd %s && cmake %s\" % (args_to_string([self.build_dir]), arg_list)\n+ if platform.system() == \"Windows\" and self.generator == \"MinGW Makefiles\":\n+ with tools.remove_from_path(\"sh\"):\n+ self._conanfile.run(command)\n+ else:\n+ self._conanfile.run(command)\n \n def build(self, args=None, build_dir=None, target=None):\n if not self._conanfile.should_build:\ndiff --git a/conans/client/build/meson.py b/conans/client/build/meson.py\nindex 1545a59d7..b8a7ff4b3 100644\n--- a/conans/client/build/meson.py\n+++ b/conans/client/build/meson.py\n@@ -4,7 +4,7 @@ from conans import tools\n from conans.client import join_arguments, defs_to_string\n from conans.errors import ConanException\n from conans.tools import args_to_string\n-from conans.util.files import mkdir\n+from conans.util.files import mkdir, get_abs_path\n \n \n class Meson(object):\n@@ -53,14 +53,6 @@ class Meson(object):\n def build_folder(self, value):\n self.build_dir = value\n \n- @staticmethod\n- def _get_dir(folder, origin):\n- if folder:\n- if os.path.isabs(folder):\n- return folder\n- return os.path.join(origin, folder)\n- return origin\n-\n def _get_dirs(self, source_folder, build_folder, source_dir, build_dir, cache_build_folder):\n if (source_folder or build_folder) and (source_dir or build_dir):\n raise ConanException(\"Use 'build_folder'/'source_folder'\")\n@@ -69,11 +61,11 @@ class Meson(object):\n build_ret = build_dir or self.build_dir or self._conanfile.build_folder\n source_ret = source_dir or self._conanfile.source_folder\n else:\n- build_ret = self._get_dir(build_folder, self._conanfile.build_folder)\n- source_ret = self._get_dir(source_folder, self._conanfile.source_folder)\n+ build_ret = get_abs_path(build_folder, self._conanfile.build_folder)\n+ source_ret = get_abs_path(source_folder, self._conanfile.source_folder)\n \n if self._conanfile.in_local_cache and cache_build_folder:\n- build_ret = self._get_dir(cache_build_folder, self._conanfile.build_folder)\n+ build_ret = get_abs_path(cache_build_folder, self._conanfile.build_folder)\n \n return source_ret, build_ret\n \n@@ -90,7 +82,7 @@ class Meson(object):\n cache_build_folder)\n \n if pkg_config_paths:\n- pc_paths = os.pathsep.join(self._get_dir(f, self._conanfile.install_folder)\n+ pc_paths = os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)\n for f in pkg_config_paths)\n else:\n pc_paths = self._conanfile.install_folder\ndiff --git a/conans/util/files.py b/conans/util/files.py\nindex d8492cd72..8c6a859a1 100644\n--- a/conans/util/files.py\n+++ b/conans/util/files.py\n@@ -181,6 +181,14 @@ def relative_dirs(path):\n return ret\n \n \n+def get_abs_path(folder, origin):\n+ if folder:\n+ if os.path.isabs(folder):\n+ return folder\n+ return os.path.join(origin, folder)\n+ return origin\n+\n+\n def _change_permissions(func, path, exc_info):\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWUSR)\n"},"problem_statement":{"kind":"string","value":"CMake build wrapper should set PKG_CONFIG_PATH\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\nconan version 1.0.4 or master\r\n\r\nA lot of cmake scripts use both `find_package` (`FindFoo.cmake`-based) and `pkg_check_modules` (`pkg-config`-based). CMake build wrapper should automatically provide `PKG_CONFIG_PATH` env var set to build directory or to recipe-provided paths. Exact same behavior is seen in `AutoToolsBuildEnviroment` or `Meson`. `CMake` should not be an exception."},"repo":{"kind":"string","value":"conan-io/conan"},"test_patch":{"kind":"string","value":"diff --git a/conans/test/build_helpers/autotools_configure_test.py b/conans/test/build_helpers/autotools_configure_test.py\nindex fed69a0a3..9f0fcd983 100644\n--- a/conans/test/build_helpers/autotools_configure_test.py\n+++ b/conans/test/build_helpers/autotools_configure_test.py\n@@ -1,16 +1,18 @@\n+import os\n import platform\n import unittest\n+from collections import namedtuple\n \n-from conans.client.build.autotools_environment import AutoToolsBuildEnvironment\n from conans import tools\n+from conans.client.build.autotools_environment import AutoToolsBuildEnvironment\n from conans.client.tools.oss import cpu_count\n+from conans.model.ref import ConanFileReference\n+from conans.model.settings import Settings\n from conans.paths import CONANFILE\n-from conans.test.utils.conanfile import MockConanfile, MockSettings, MockOptions\n+from conans.test.build_helpers.cmake_test import ConanFileMock\n from conans.test.util.tools_test import RunnerMock\n+from conans.test.utils.conanfile import MockConanfile, MockSettings, MockOptions\n from conans.test.utils.tools import TestClient\n-from conans.test.build_helpers.cmake_test import ConanFileMock\n-from conans.model.settings import Settings\n-from collections import namedtuple\n \n \n class AutoToolsConfigureTest(unittest.TestCase):\n@@ -416,9 +418,12 @@ class HelloConan(ConanFile):\n self.assertIn(\"PKG_CONFIG_PATH=%s\" % client.client_cache.conan_folder, client.out)\n \n client.save({CONANFILE: conanfile % (\"'pkg_config'\",\n- \"pkg_config_paths=['/tmp/hello', '/tmp/foo']\")})\n+ \"pkg_config_paths=['/tmp/hello', 'foo']\")})\n client.run(\"create . conan/testing\")\n- self.assertIn(\"PKG_CONFIG_PATH=/tmp/hello:/tmp/foo\", client.out)\n+ ref = ConanFileReference.loads(\"Hello/1.2.1@conan/testing\")\n+ builds_folder = client.client_cache.builds(ref)\n+ bf = os.path.join(builds_folder, os.listdir(builds_folder)[0])\n+ self.assertIn(\"PKG_CONFIG_PATH=/tmp/hello:%s/foo\" % bf, client.out)\n \n def cross_build_command_test(self):\n runner = RunnerMock()\ndiff --git a/conans/test/build_helpers/cmake_test.py b/conans/test/build_helpers/cmake_test.py\nindex 812c53444..09aab1631 100644\n--- a/conans/test/build_helpers/cmake_test.py\n+++ b/conans/test/build_helpers/cmake_test.py\n@@ -688,6 +688,38 @@ build_type: [ Release]\n cmake.configure()\n self.assertNotIn(self.tempdir, conanfile.path)\n \n+ def test_pkg_config_path(self):\n+ conanfile = ConanFileMock()\n+ conanfile.generators = [\"pkg_config\"]\n+ conanfile.install_folder = \"/my_install/folder/\"\n+ settings = Settings.loads(default_settings_yml)\n+ settings.os = \"Windows\"\n+ settings.compiler = \"Visual Studio\"\n+ settings.compiler.version = \"12\"\n+ settings.arch = \"x86\"\n+ conanfile.settings = settings\n+ cmake = CMake(conanfile)\n+ cmake.configure()\n+ self.assertEquals(conanfile.captured_env[\"PKG_CONFIG_PATH\"], \"/my_install/folder/\")\n+\n+ conanfile.generators = []\n+ cmake = CMake(conanfile)\n+ cmake.configure()\n+ self.assertNotIn(\"PKG_CONFIG_PATH\", conanfile.captured_env)\n+\n+ cmake = CMake(conanfile)\n+ cmake.configure(pkg_config_paths=[\"reldir1\", \"/abspath2/to/other\"])\n+ self.assertEquals(conanfile.captured_env[\"PKG_CONFIG_PATH\"],\n+ os.path.pathsep.join([\"/my_install/folder/reldir1\",\n+ \"/abspath2/to/other\"]))\n+\n+ # If there is already a PKG_CONFIG_PATH do not set it\n+ conanfile.generators = [\"pkg_config\"]\n+ cmake = CMake(conanfile)\n+ with tools.environment_append({\"PKG_CONFIG_PATH\": \"do_not_mess_with_this\"}):\n+ cmake.configure()\n+ self.assertEquals(conanfile.captured_env[\"PKG_CONFIG_PATH\"], \"do_not_mess_with_this\")\n+\n def test_shared(self):\n settings = Settings.loads(default_settings_yml)\n settings.os = \"Windows\"\n@@ -843,7 +875,10 @@ class ConanFileMock(ConanFile):\n self.should_configure = True\n self.should_build = True\n self.should_install = True\n+ self.generators = []\n+ self.captured_env = {}\n \n def run(self, command):\n self.command = command\n self.path = os.environ[\"PATH\"]\n+ self.captured_env = {key: value for key, value in os.environ.items()}\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_hyperlinks\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 0\n },\n \"num_modified_files\": 4\n}"},"version":{"kind":"string","value":"1.3"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"nose\",\n \"pytest\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"conans/requirements.txt\",\n \"conans/requirements_osx.txt\",\n \"conans/requirements_server.txt\",\n \"conans/requirements_dev.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"asn1crypto==1.5.1\nastroid==1.6.6\nattrs==22.2.0\nbeautifulsoup4==4.12.3\nbottle==0.12.25\ncertifi==2021.5.30\ncffi==1.15.1\ncharset-normalizer==2.0.12\ncodecov==2.1.13\ncolorama==0.3.9\n-e git+https://github.com/conan-io/conan.git@c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759#egg=conan\ncoverage==4.2\ncryptography==2.1.4\ndeprecation==2.0.7\ndistro==1.1.0\nfasteners==0.19\nfuture==0.16.0\nidna==3.10\nimportlib-metadata==4.8.3\niniconfig==1.1.1\nisort==5.10.1\nlazy-object-proxy==1.7.1\nmccabe==0.7.0\nmock==1.3.0\nndg-httpsclient==0.4.4\nnode-semver==0.2.0\nnose==1.3.7\npackaging==21.3\nparameterized==0.8.1\npatch==1.16\npbr==6.1.1\npluggy==1.0.0\npluginbase==0.7\npy==1.11.0\npyasn==1.5.0b7\npyasn1==0.5.1\npycparser==2.21\nPygments==2.14.0\nPyJWT==1.7.1\npylint==1.8.4\npyOpenSSL==17.5.0\npyparsing==3.1.4\npytest==7.0.1\nPyYAML==3.12\nrequests==2.27.1\nsix==1.17.0\nsoupsieve==2.3.2.post1\ntomli==1.2.3\ntyping_extensions==4.1.1\nurllib3==1.26.20\nwaitress==2.0.0\nWebOb==1.8.9\nWebTest==2.0.35\nwrapt==1.16.0\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: conan\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - asn1crypto==1.5.1\n - astroid==1.6.6\n - attrs==22.2.0\n - beautifulsoup4==4.12.3\n - bottle==0.12.25\n - cffi==1.15.1\n - charset-normalizer==2.0.12\n - codecov==2.1.13\n - colorama==0.3.9\n - coverage==4.2\n - cryptography==2.1.4\n - deprecation==2.0.7\n - distro==1.1.0\n - fasteners==0.19\n - future==0.16.0\n - idna==3.10\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - isort==5.10.1\n - lazy-object-proxy==1.7.1\n - mccabe==0.7.0\n - mock==1.3.0\n - ndg-httpsclient==0.4.4\n - node-semver==0.2.0\n - nose==1.3.7\n - packaging==21.3\n - parameterized==0.8.1\n - patch==1.16\n - pbr==6.1.1\n - pluggy==1.0.0\n - pluginbase==0.7\n - py==1.11.0\n - pyasn==1.5.0b7\n - pyasn1==0.5.1\n - pycparser==2.21\n - pygments==2.14.0\n - pyjwt==1.7.1\n - pylint==1.8.4\n - pyopenssl==17.5.0\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pyyaml==3.12\n - requests==2.27.1\n - six==1.17.0\n - soupsieve==2.3.2.post1\n - tomli==1.2.3\n - typing-extensions==4.1.1\n - urllib3==1.26.20\n - waitress==2.0.0\n - webob==1.8.9\n - webtest==2.0.35\n - wrapt==1.16.0\n - zipp==3.6.0\nprefix: /opt/conda/envs/conan\n"},"FAIL_TO_PASS":{"kind":"list like","value":["conans/test/build_helpers/cmake_test.py::CMakeTest::test_pkg_config_path"],"string":"[\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_pkg_config_path\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_pkg_config_paths"],"string":"[\n \"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_pkg_config_paths\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_cppstd","conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_make_targets_install","conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_mocked_methods","conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_previous_env","conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_variables","conans/test/build_helpers/cmake_test.py::CMakeTest::test_clean_sh_path","conans/test/build_helpers/cmake_test.py::CMakeTest::test_cmake_system_version_android","conans/test/build_helpers/cmake_test.py::CMakeTest::test_cores_ancient_visual","conans/test/build_helpers/cmake_test.py::CMakeTest::test_deprecated_behaviour","conans/test/build_helpers/cmake_test.py::CMakeTest::test_missing_settings","conans/test/build_helpers/cmake_test.py::CMakeTest::test_run_tests","conans/test/build_helpers/cmake_test.py::CMakeTest::test_shared","conans/test/build_helpers/cmake_test.py::CMakeTest::test_sysroot","conans/test/build_helpers/cmake_test.py::CMakeTest::test_verbose"],"string":"[\n \"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_cppstd\",\n \"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_make_targets_install\",\n \"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_mocked_methods\",\n \"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_previous_env\",\n \"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_variables\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_clean_sh_path\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cmake_system_version_android\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cores_ancient_visual\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_deprecated_behaviour\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_missing_settings\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_run_tests\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_shared\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_sysroot\",\n \"conans/test/build_helpers/cmake_test.py::CMakeTest::test_verbose\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2594,"string":"2,594"},"num_tokens_patch":{"kind":"number","value":1786,"string":"1,786"},"before_filepaths":{"kind":"list like","value":["conans/client/build/autotools_environment.py","conans/client/build/cmake.py","conans/client/build/meson.py","conans/util/files.py"],"string":"[\n \"conans/client/build/autotools_environment.py\",\n \"conans/client/build/cmake.py\",\n \"conans/client/build/meson.py\",\n \"conans/util/files.py\"\n]"}}},{"rowIdx":586,"cells":{"instance_id":{"kind":"string","value":"ingresso-group__pyticketswitch-70"},"base_commit":{"kind":"string","value":"a22c4a3679174b1798acda89e59559930eb1f1a3"},"created_at":{"kind":"string","value":"2018-05-29 18:32:26"},"environment_setup_commit":{"kind":"string","value":"a22c4a3679174b1798acda89e59559930eb1f1a3"},"hints_text":{"kind":"string","value":"codecov[bot]: # [Codecov](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=h1) Report\n> Merging [#70](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=desc) into [master](https://codecov.io/gh/ingresso-group/pyticketswitch/commit/a22c4a3679174b1798acda89e59559930eb1f1a3?src=pr&el=desc) will **increase** coverage by `0.02%`.\n> The diff coverage is `100%`.\n\n[](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=tree)\n\n```diff\n@@ Coverage Diff @@\n## master #70 +/- ##\n==========================================\n+ Coverage 99.48% 99.51% +0.02% \n==========================================\n Files 37 37 \n Lines 1764 2046 +282 \n==========================================\n+ Hits 1755 2036 +281 \n- Misses 9 10 +1\n```\n\n\n| [Impacted Files](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=tree) | Coverage Δ | |\n|---|---|---|\n| [pyticketswitch/reservation.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvcmVzZXJ2YXRpb24ucHk=) | `100% <100%> (ø)` | :arrow_up: |\n| [pyticketswitch/exceptions.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvZXhjZXB0aW9ucy5weQ==) | `100% <100%> (ø)` | :arrow_up: |\n| [pyticketswitch/trolley.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvdHJvbGxleS5weQ==) | `94.73% <100%> (+0.09%)` | :arrow_up: |\n| [pyticketswitch/client.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvY2xpZW50LnB5) | `99.31% <100%> (+0.13%)` | :arrow_up: |\n| [pyticketswitch/availability.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvYXZhaWxhYmlsaXR5LnB5) | `100% <0%> (ø)` | :arrow_up: |\n| [pyticketswitch/mixins.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvbWl4aW5zLnB5) | `100% <0%> (ø)` | :arrow_up: |\n\n------\n\n[Continue to review full report at Codecov](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=continue).\n> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)\n> `Δ = absolute (impact)`, `ø = not affected`, `? = missing data`\n> Powered by [Codecov](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=footer). Last update [a22c4a3...3a2e91d](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).\n"},"patch":{"kind":"string","value":"diff --git a/pyticketswitch/client.py b/pyticketswitch/client.py\nindex 8b00be5..7726b43 100644\n--- a/pyticketswitch/client.py\n+++ b/pyticketswitch/client.py\n@@ -1081,7 +1081,8 @@ class Client(object):\n def get_trolley(self, token=None, number_of_seats=None, discounts=None,\n seats=None, send_codes=None, ticket_type_code=None,\n performance_id=None, price_band_code=None,\n- item_numbers_to_remove=None, **kwargs):\n+ item_numbers_to_remove=None,\n+ raise_on_unavailable_order=False, **kwargs):\n \n \"\"\"Retrieve the contents of a trolley from the API.\n \n@@ -1097,14 +1098,17 @@ class Client(object):\n seats (list): list of seat IDs.\n send_codes (dict): send codes indexed on backend source\n code.\n- ticket_type_code: (string): code of ticket type to add to\n+ ticket_type_code (string): code of ticket type to add to\n the trolley.\n- performance_id: (string): id of the performance to add to\n+ performance_id (string): id of the performance to add to\n the trolley.\n- price_band_code: (string): code of price band to add to\n+ price_band_code (string): code of price band to add to\n the trolley.\n- item_numbers_to_remove: (list): list of item numbers to\n+ item_numbers_to_remove (list): list of item numbers to\n remove from trolley.\n+ raise_on_unavailable_order (bool): When set to ``True`` this method\n+ will raise an exception when the API was not able to add an\n+ order to the trolley as it was unavailable.\n **kwargs: arbitary additional raw keyword arguments to add the\n parameters.\n \n@@ -1116,6 +1120,9 @@ class Client(object):\n Raises:\n InvalidParametersError: when there is an issue with the provided\n parameters.\n+ OrderUnavailableError: when ``raise_on_unavailable_order`` is set\n+ to ``True`` and the requested addition to a trolley was\n+ unavailable.\n \n .. _`/f13/trolley.v1`: http://docs.ingresso.co.uk/#trolley\n \n@@ -1133,6 +1140,11 @@ class Client(object):\n trolley = Trolley.from_api_data(response)\n meta = CurrencyMeta.from_api_data(response)\n \n+ if raise_on_unavailable_order:\n+ if trolley and trolley.input_contained_unavailable_order:\n+ raise exceptions.OrderUnavailableError(\n+ \"inputs contained unavailable order\")\n+\n return trolley, meta\n \n def get_upsells(self, token=None, number_of_seats=None, discounts=None,\n@@ -1278,7 +1290,8 @@ class Client(object):\n def make_reservation(self, token=None, number_of_seats=None, discounts=None,\n seats=None, send_codes=None, ticket_type_code=None,\n performance_id=None, price_band_code=None,\n- item_numbers_to_remove=None, **kwargs):\n+ item_numbers_to_remove=None,\n+ raise_on_unavailable_order=False, **kwargs):\n \n \"\"\"Attempt to reserve all the items in the given trolley\n \n@@ -1314,6 +1327,9 @@ class Client(object):\n the trolley\n item_numbers_to_remove: (list): list of item numbers to\n remove from trolley.\n+ raise_on_unavailable_order (bool): When set to ``True`` this method\n+ will raise an exception when the API was not able to add an\n+ order to the trolley as it was unavailable.\n **kwargs: arbitary additional raw keyword arguments to add the\n parameters.\n \n@@ -1325,6 +1341,9 @@ class Client(object):\n Raises:\n InvalidParametersError: when there is an issue with the provided\n parameters.\n+ OrderUnavailableError: when ``raise_on_unavailable_order`` is set\n+ to ``True`` and the requested addition to a trolley was\n+ unavailable.\n \n .. _`/f13/reserve.v1`: http://docs.ingresso.co.uk/#reserve\n \n@@ -1342,15 +1361,22 @@ class Client(object):\n reservation = Reservation.from_api_data(response)\n meta = CurrencyMeta.from_api_data(response)\n \n+ if raise_on_unavailable_order:\n+ if reservation and reservation.input_contained_unavailable_order:\n+ raise exceptions.OrderUnavailableError(\n+ \"inputs contained unavailable order\")\n+\n return reservation, meta\n \n- def release_reservation(self, transaction_uuid):\n+ def release_reservation(self, transaction_uuid, **kwargs):\n \"\"\"Release an existing reservation.\n \n Wraps `/f13/release.v1`_\n \n Args:\n transaction_uuid (str): the identifier of the reservaiton.\n+ **kwargs: arbitary additional raw keyword arguments to add the\n+ parameters.\n \n Returns:\n bool: :obj:`True` if the reservation was successfully released\n@@ -1361,7 +1387,8 @@ class Client(object):\n \"\"\"\n \n params = {'transaction_uuid': transaction_uuid}\n- response = self.make_request('release.v1', params, method=POST)\n+ kwargs.update(params)\n+ response = self.make_request('release.v1', kwargs, method=POST)\n \n return response.get('released_ok', False)\n \ndiff --git a/pyticketswitch/exceptions.py b/pyticketswitch/exceptions.py\nindex f88f636..3aef367 100644\n--- a/pyticketswitch/exceptions.py\n+++ b/pyticketswitch/exceptions.py\n@@ -51,3 +51,7 @@ class BackendThrottleError(BackendError):\n \n class CallbackGoneError(APIError):\n pass\n+\n+\n+class OrderUnavailableError(PyticketswitchError):\n+ pass\ndiff --git a/pyticketswitch/reservation.py b/pyticketswitch/reservation.py\nindex b12d2bb..a75087e 100644\n--- a/pyticketswitch/reservation.py\n+++ b/pyticketswitch/reservation.py\n@@ -46,9 +46,12 @@ class Reservation(Status):\n \n \"\"\"\n \n- def __init__(self, unreserved_orders=None, *args, **kwargs):\n+ def __init__(self, unreserved_orders=None,\n+ input_contained_unavailable_order=False, *args, **kwargs):\n+\n super(Reservation, self).__init__(*args, **kwargs)\n self.unreserved_orders = unreserved_orders\n+ self.input_contained_unavailable_order = input_contained_unavailable_order\n \n @classmethod\n def from_api_data(cls, data):\n@@ -75,7 +78,9 @@ class Reservation(Status):\n for order in raw_unreserved_orders\n ]\n \n- inst.unreserved_orders=unreserved_orders\n+ inst.unreserved_orders = unreserved_orders\n+ inst.input_contained_unavailable_order = data.get(\n+ 'input_contained_unavailable_order', False)\n \n return inst\n \ndiff --git a/pyticketswitch/trolley.py b/pyticketswitch/trolley.py\nindex 0a78e2e..df54c75 100644\n--- a/pyticketswitch/trolley.py\n+++ b/pyticketswitch/trolley.py\n@@ -25,11 +25,14 @@ class Trolley(JSONMixin, object):\n order_count (int): the number of orders in the trolley.\n purchase_result (:class:`PurchaseResult `):\n the result of the purchase attempt when available.\n-\n+ input_contained_unavailable_order (bool): indicates that the call used\n+ to create or modify this trolley object included at least one order\n+ that was not available.\n \"\"\"\n def __init__(self, token=None, transaction_uuid=None, transaction_id=None,\n bundles=None, discarded_orders=None, minutes_left=None,\n- order_count=None, purchase_result=None):\n+ order_count=None, purchase_result=None,\n+ input_contained_unavailable_order=False):\n self.token = token\n self.transaction_uuid = transaction_uuid\n self.transaction_id = transaction_id\n@@ -38,6 +41,7 @@ class Trolley(JSONMixin, object):\n self.minutes_left = minutes_left\n self.order_count = order_count\n self.purchase_result = purchase_result\n+ self.input_contained_unavailable_order = input_contained_unavailable_order\n \n @classmethod\n def from_api_data(cls, data):\n@@ -82,6 +86,8 @@ class Trolley(JSONMixin, object):\n 'transaction_uuid': raw_contents.get('transaction_uuid'),\n 'transaction_id': raw_contents.get('transaction_id'),\n 'order_count': data.get('trolley_order_count'),\n+ 'input_contained_unavailable_order': data.get(\n+ 'input_contained_unavailable_order', False),\n }\n \n minutes = data.get('minutes_left_on_reserve')\n"},"problem_statement":{"kind":"string","value":"missing \"input_contained_unavailable_order\" flag in trolley/reservation response\nCurrently when you attempt to add something to a trolley that is not available (sold out, performance in the past, max tickets per order exceeded, etc) it just appears as an empty trolley without any indication that something has gone wrong.\r\n\r\nThe API returns the `input_contained_unavailable_order` flag in it's response from `trolley.v1` and `reserve.v1`, and this should be added to the trolley object. I would suggest we should look at raising an exception as well."},"repo":{"kind":"string","value":"ingresso-group/pyticketswitch"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_client.py b/tests/test_client.py\nindex 6dfc234..c339059 100644\n--- a/tests/test_client.py\n+++ b/tests/test_client.py\n@@ -1320,6 +1320,34 @@ class TestClient:\n assert 'gbp' in meta.currencies\n assert meta.default_currency_code == 'gbp'\n \n+ def test_get_trolley_with_unavailable_order(self, client, monkeypatch):\n+ \"\"\"\n+ This test is to check that an unavailable order doesn't raise\n+ any exceptions unless `raise_on_unavailable_order` is set to true\n+ \"\"\"\n+ response = {\n+ 'trolley_contents': {},\n+ 'trolley_token': 'DEF456',\n+ 'currency_code': 'gbp',\n+ 'input_contained_unavailable_order': True,\n+ 'currency_details': {\n+ 'gbp': {\n+ 'currency_code': 'gbp',\n+ }\n+ }\n+ }\n+\n+ mock_make_request = Mock(return_value=response)\n+ monkeypatch.setattr(client, 'make_request', mock_make_request)\n+\n+ # this should not raise any exceptions\n+ client.get_trolley()\n+\n+ # but this should\n+ with pytest.raises(exceptions.OrderUnavailableError):\n+ client.get_trolley(raise_on_unavailable_order=True)\n+\n+\n def test_get_upsells(self, client, monkeypatch):\n # fakes\n response = {\n@@ -1409,6 +1437,26 @@ class TestClient:\n assert 'gbp' in meta.currencies\n assert meta.default_currency_code == 'gbp'\n \n+ def test_make_reservation_with_unavailable_order(self, client, monkeypatch):\n+ \"\"\"\n+ This test is to check that an unavailable order doesn't raise\n+ any exceptions unless `raise_on_unavailable_order` is set to true\n+ \"\"\"\n+ data = {\n+ \"input_contained_unavailable_order\": True,\n+ \"unreserved_orders\": [],\n+ }\n+\n+ mock_make_request = Mock(return_value=data)\n+ monkeypatch.setattr(client, 'make_request', mock_make_request)\n+\n+ # this should not raise any exceptions\n+ client.make_reservation()\n+\n+ # but this should\n+ with pytest.raises(exceptions.OrderUnavailableError):\n+ client.make_reservation(raise_on_unavailable_order=True)\n+\n def test_get_status(self, client, monkeypatch):\n response = {\n 'trolley_contents': {\ndiff --git a/tests/test_reservation.py b/tests/test_reservation.py\nindex 91c0895..28bbf74 100644\n--- a/tests/test_reservation.py\n+++ b/tests/test_reservation.py\n@@ -59,3 +59,13 @@ class TestReservation:\n \n assert len(reservation.unreserved_orders) == 1\n assert reservation.minutes_left == 15\n+\n+ def test_from_api_data_with_unavailable_orders(self):\n+ data = {\n+ \"input_contained_unavailable_order\": True,\n+ \"unreserved_orders\": [],\n+ }\n+\n+ reservation = Reservation.from_api_data(data)\n+\n+ assert reservation.input_contained_unavailable_order is True\ndiff --git a/tests/test_trolley.py b/tests/test_trolley.py\nindex fb9b9df..0370757 100644\n--- a/tests/test_trolley.py\n+++ b/tests/test_trolley.py\n@@ -68,6 +68,23 @@ class TestTrolley:\n assert trolley.discarded_orders[0].item == 3\n assert trolley.discarded_orders[1].item == 6\n \n+ def test_from_api_data_with_empty_trolley(self):\n+ data = {\n+ \"discarded_orders\": [],\n+ \"input_contained_unavailable_order\": True,\n+ \"trolley_token\": \"abc123\",\n+ \"trolley_token_contents\": {\n+ \"trolley_bundle_count\": 0,\n+ \"trolley_order_count\": 0\n+ }\n+ }\n+\n+ trolley = Trolley.from_api_data(data)\n+\n+ assert trolley.token == 'abc123'\n+ assert trolley.input_contained_unavailable_order is True\n+\n+\n def test_get_events(self):\n \n event_one = Event(id_='abc123')\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 0\n },\n \"num_modified_files\": 4\n}"},"version":{"kind":"string","value":"2.3"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"flake8\",\n \"pylint\",\n \"pytest\",\n \"behave\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements/test.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"astroid==2.11.7\nattrs==22.2.0\nbehave==1.2.6\ncertifi==2021.5.30\ncoverage==6.2\ndill==0.3.4\ndistlib==0.3.9\nfilelock==3.4.1\nflake8==5.0.4\nidna==3.10\nimportlib-metadata==1.7.0\nimportlib-resources==5.4.0\niniconfig==1.1.1\nisort==5.10.1\nlazy-object-proxy==1.7.1\nmccabe==0.7.0\nmock==5.2.0\nmultidict==5.2.0\npackaging==21.3\nparse==1.20.2\nparse-type==0.6.0\nplatformdirs==2.4.0\npluggy==0.13.1\npy==1.11.0\npycodestyle==2.9.1\npyflakes==2.5.0\nPyHamcrest==2.1.0\npylint==2.13.9\npyparsing==3.1.4\npytest==7.0.1\npytest-cov==4.0.0\npython-dateutil==2.9.0.post0\n-e git+https://github.com/ingresso-group/pyticketswitch.git@a22c4a3679174b1798acda89e59559930eb1f1a3#egg=pyticketswitch\nPyYAML==6.0.1\nrequests==2.9.1\nrequests-mock==1.11.0\nsix==1.11.0\ntoml==0.10.2\ntomli==1.2.3\ntox==3.14.3\ntyped-ast==1.5.5\ntyping_extensions==4.1.1\nvcrpy==4.1.1\nvirtualenv==20.16.2\nwrapt==1.16.0\nyarl==1.7.2\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: pyticketswitch\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - astroid==2.11.7\n - attrs==22.2.0\n - behave==1.2.6\n - coverage==6.2\n - dill==0.3.4\n - distlib==0.3.9\n - filelock==3.4.1\n - flake8==5.0.4\n - idna==3.10\n - importlib-metadata==1.7.0\n - importlib-resources==5.4.0\n - iniconfig==1.1.1\n - isort==5.10.1\n - lazy-object-proxy==1.7.1\n - mccabe==0.7.0\n - mock==5.2.0\n - multidict==5.2.0\n - packaging==21.3\n - parse==1.20.2\n - parse-type==0.6.0\n - platformdirs==2.4.0\n - pluggy==0.13.1\n - py==1.11.0\n - pycodestyle==2.9.1\n - pyflakes==2.5.0\n - pyhamcrest==2.1.0\n - pylint==2.13.9\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pytest-cov==4.0.0\n - python-dateutil==2.9.0.post0\n - pyyaml==6.0.1\n - requests==2.9.1\n - requests-mock==1.11.0\n - six==1.11.0\n - toml==0.10.2\n - tomli==1.2.3\n - tox==3.14.3\n - typed-ast==1.5.5\n - typing-extensions==4.1.1\n - vcrpy==4.1.1\n - virtualenv==20.16.2\n - wrapt==1.16.0\n - yarl==1.7.2\n - zipp==3.6.0\nprefix: /opt/conda/envs/pyticketswitch\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_client.py::TestClient::test_get_trolley_with_unavailable_order","tests/test_client.py::TestClient::test_make_reservation_with_unavailable_order","tests/test_reservation.py::TestReservation::test_from_api_data_with_unavailable_orders","tests/test_trolley.py::TestTrolley::test_from_api_data_with_empty_trolley"],"string":"[\n \"tests/test_client.py::TestClient::test_get_trolley_with_unavailable_order\",\n \"tests/test_client.py::TestClient::test_make_reservation_with_unavailable_order\",\n \"tests/test_reservation.py::TestReservation::test_from_api_data_with_unavailable_orders\",\n \"tests/test_trolley.py::TestTrolley::test_from_api_data_with_empty_trolley\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_client.py::TestClient::test_get_url","tests/test_client.py::TestClient::test_make_request","tests/test_client.py::TestClient::test_make_request_with_timeout","tests/test_client.py::TestClient::test_make_request_with_post","tests/test_client.py::TestClient::test_make_request_with_subuser","tests/test_client.py::TestClient::test_make_request_with_tracking_id","tests/test_client.py::TestClient::test_make_request_when_using_per_request_tracking_id","tests/test_client.py::TestClient::test_make_request_bad_response_with_auth_error","tests/test_client.py::TestClient::test_make_request_bad_response_with_error","tests/test_client.py::TestClient::test_make_request_bad_response_without_error","tests/test_client.py::TestClient::test_make_request_410_gone_response","tests/test_client.py::TestClient::test_make_request_no_contents_raises","tests/test_client.py::TestClient::test_add_optional_kwargs_extra_info","tests/test_client.py::TestClient::test_add_optional_kwargs_reviews","tests/test_client.py::TestClient::test_add_optional_kwargs_media","tests/test_client.py::TestClient::test_add_optional_kwargs_cost_range","tests/test_client.py::TestClient::test_add_optional_kwargs_best_value_offer","tests/test_client.py::TestClient::test_add_optional_kwargs_max_saving_offer","tests/test_client.py::TestClient::test_add_optional_kwargs_min_cost_offer","tests/test_client.py::TestClient::test_add_optional_kwargs_top_price_offer","tests/test_client.py::TestClient::test_add_optional_kwargs_no_singles_data","tests/test_client.py::TestClient::test_add_optional_kwargs_cost_range_details","tests/test_client.py::TestClient::test_add_optional_kwargs_avail_details","tests/test_client.py::TestClient::test_add_optional_kwargs_avail_details_with_perfs","tests/test_client.py::TestClient::test_add_optional_kwargs_source_info","tests/test_client.py::TestClient::test_list_events","tests/test_client.py::TestClient::test_list_events_with_keywords","tests/test_client.py::TestClient::test_list_events_with_start_date","tests/test_client.py::TestClient::test_list_events_with_end_date","tests/test_client.py::TestClient::test_list_events_with_start_and_end_date","tests/test_client.py::TestClient::test_list_events_country_code","tests/test_client.py::TestClient::test_list_events_city_code","tests/test_client.py::TestClient::test_list_events_geolocation","tests/test_client.py::TestClient::test_list_events_invalid_geolocation","tests/test_client.py::TestClient::test_list_events_include_dead","tests/test_client.py::TestClient::test_list_events_sort_order","tests/test_client.py::TestClient::test_list_events_pagination","tests/test_client.py::TestClient::test_list_events_no_results","tests/test_client.py::TestClient::test_list_events_misc_kwargs","tests/test_client.py::TestClient::test_get_events","tests/test_client.py::TestClient::test_get_events_event_list","tests/test_client.py::TestClient::test_get_events_no_results","tests/test_client.py::TestClient::test_get_events_misc_kwargs","tests/test_client.py::TestClient::test_get_events_with_upsell","tests/test_client.py::TestClient::test_get_events_with_addons","tests/test_client.py::TestClient::test_get_event","tests/test_client.py::TestClient::test_get_months","tests/test_client.py::TestClient::test_get_months_no_results","tests/test_client.py::TestClient::test_get_months_misc_kwargs","tests/test_client.py::TestClient::test_list_performances_no_results","tests/test_client.py::TestClient::test_list_performances","tests/test_client.py::TestClient::test_list_performances_cost_range","tests/test_client.py::TestClient::test_list_performances_best_value_offer","tests/test_client.py::TestClient::test_list_performances_max_saving_offer","tests/test_client.py::TestClient::test_list_performances_min_cost_offer","tests/test_client.py::TestClient::test_list_performances_top_price_offer","tests/test_client.py::TestClient::test_list_performances_no_singles_data","tests/test_client.py::TestClient::test_list_performances_availability","tests/test_client.py::TestClient::test_list_performances_pagination","tests/test_client.py::TestClient::test_list_performances_with_start_date","tests/test_client.py::TestClient::test_list_performancess_with_end_date","tests/test_client.py::TestClient::test_list_performances_with_start_and_end_date","tests/test_client.py::TestClient::test_list_performances_misc_kwargs","tests/test_client.py::TestClient::test_get_performances","tests/test_client.py::TestClient::test_get_performances_no_performances","tests/test_client.py::TestClient::test_get_performances_misc_kwargs","tests/test_client.py::TestClient::test_get_performance","tests/test_client.py::TestClient::test_get_availability","tests/test_client.py::TestClient::test_get_availability_with_number_of_seats","tests/test_client.py::TestClient::test_get_availability_with_discounts","tests/test_client.py::TestClient::test_get_availability_with_example_seats","tests/test_client.py::TestClient::test_get_availability_with_seat_blocks","tests/test_client.py::TestClient::test_get_availability_with_user_commission","tests/test_client.py::TestClient::test_get_availability_no_availability","tests/test_client.py::TestClient::test_get_send_methods","tests/test_client.py::TestClient::test_get_send_methods_bad_data","tests/test_client.py::TestClient::test_get_discounts","tests/test_client.py::TestClient::test_get_discounts_bad_data","tests/test_client.py::TestClient::test_trolley_params_with_trolley_token","tests/test_client.py::TestClient::test_trolley_params_with_performance_id","tests/test_client.py::TestClient::test_trolley_params_with_number_of_seats","tests/test_client.py::TestClient::test_trolley_params_with_ticket_type_code","tests/test_client.py::TestClient::test_trolley_params_with_price_band_code","tests/test_client.py::TestClient::test_trolley_params_with_item_numbers_to_remove","tests/test_client.py::TestClient::test_trolley_params_with_item_numbers_to_remove_with_no_token","tests/test_client.py::TestClient::test_trolley_params_with_seats","tests/test_client.py::TestClient::test_trolley_params_with_discounts","tests/test_client.py::TestClient::test_trolley_params_with_send_codes","tests/test_client.py::TestClient::test_trolley_params_with_invalid_send_codes","tests/test_client.py::TestClient::test_get_trolley","tests/test_client.py::TestClient::test_get_upsells","tests/test_client.py::TestClient::test_get_addons","tests/test_client.py::TestClient::test_make_reservation","tests/test_client.py::TestClient::test_get_status","tests/test_client.py::TestClient::test_get_status_with_trans","tests/test_client.py::TestClient::test_test","tests/test_client.py::TestClient::test_release_reservation","tests/test_client.py::TestClient::test_make_purchase_card_details","tests/test_client.py::TestClient::test_make_purchase_redirection","tests/test_client.py::TestClient::test_make_purchase_credit","tests/test_client.py::TestClient::test_make_purchase_opting_out_of_confirmation_email","tests/test_client.py::TestClient::test_next_callout","tests/test_client.py::TestClient::test_next_callout_with_additional_callout","tests/test_client.py::TestClient::test_auth_can_be_overridden_with_subclass","tests/test_client.py::TestClient::test_extra_params_can_be_overriden_by_subclass","tests/test_client.py::TestClient::test_get_auth_params_raises_deprecation_warning","tests/test_client.py::TestClient::test_make_request_using_decimal_parsing","tests/test_client.py::TestClient::test_make_request_using_float_parsing","tests/test_reservation.py::TestReservation::test_from_api_data","tests/test_trolley.py::TestTrolley::test_from_api_data_with_trolley_data","tests/test_trolley.py::TestTrolley::test_from_api_data_with_reservation_data","tests/test_trolley.py::TestTrolley::test_get_events","tests/test_trolley.py::TestTrolley::test_get_events_with_no_bundles","tests/test_trolley.py::TestTrolley::test_get_event_ids","tests/test_trolley.py::TestTrolley::test_get_bundle","tests/test_trolley.py::TestTrolley::test_get_bundle_when_none","tests/test_trolley.py::TestTrolley::test_get_bundle_when_no_match","tests/test_trolley.py::TestTrolley::test_get_item","tests/test_trolley.py::TestTrolley::test_get_orders"],"string":"[\n \"tests/test_client.py::TestClient::test_get_url\",\n \"tests/test_client.py::TestClient::test_make_request\",\n \"tests/test_client.py::TestClient::test_make_request_with_timeout\",\n \"tests/test_client.py::TestClient::test_make_request_with_post\",\n \"tests/test_client.py::TestClient::test_make_request_with_subuser\",\n \"tests/test_client.py::TestClient::test_make_request_with_tracking_id\",\n \"tests/test_client.py::TestClient::test_make_request_when_using_per_request_tracking_id\",\n \"tests/test_client.py::TestClient::test_make_request_bad_response_with_auth_error\",\n \"tests/test_client.py::TestClient::test_make_request_bad_response_with_error\",\n \"tests/test_client.py::TestClient::test_make_request_bad_response_without_error\",\n \"tests/test_client.py::TestClient::test_make_request_410_gone_response\",\n \"tests/test_client.py::TestClient::test_make_request_no_contents_raises\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_extra_info\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_reviews\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_media\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_cost_range\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_best_value_offer\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_max_saving_offer\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_min_cost_offer\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_top_price_offer\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_no_singles_data\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_cost_range_details\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_avail_details\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_avail_details_with_perfs\",\n \"tests/test_client.py::TestClient::test_add_optional_kwargs_source_info\",\n \"tests/test_client.py::TestClient::test_list_events\",\n \"tests/test_client.py::TestClient::test_list_events_with_keywords\",\n \"tests/test_client.py::TestClient::test_list_events_with_start_date\",\n \"tests/test_client.py::TestClient::test_list_events_with_end_date\",\n \"tests/test_client.py::TestClient::test_list_events_with_start_and_end_date\",\n \"tests/test_client.py::TestClient::test_list_events_country_code\",\n \"tests/test_client.py::TestClient::test_list_events_city_code\",\n \"tests/test_client.py::TestClient::test_list_events_geolocation\",\n \"tests/test_client.py::TestClient::test_list_events_invalid_geolocation\",\n \"tests/test_client.py::TestClient::test_list_events_include_dead\",\n \"tests/test_client.py::TestClient::test_list_events_sort_order\",\n \"tests/test_client.py::TestClient::test_list_events_pagination\",\n \"tests/test_client.py::TestClient::test_list_events_no_results\",\n \"tests/test_client.py::TestClient::test_list_events_misc_kwargs\",\n \"tests/test_client.py::TestClient::test_get_events\",\n \"tests/test_client.py::TestClient::test_get_events_event_list\",\n \"tests/test_client.py::TestClient::test_get_events_no_results\",\n \"tests/test_client.py::TestClient::test_get_events_misc_kwargs\",\n \"tests/test_client.py::TestClient::test_get_events_with_upsell\",\n \"tests/test_client.py::TestClient::test_get_events_with_addons\",\n \"tests/test_client.py::TestClient::test_get_event\",\n \"tests/test_client.py::TestClient::test_get_months\",\n \"tests/test_client.py::TestClient::test_get_months_no_results\",\n \"tests/test_client.py::TestClient::test_get_months_misc_kwargs\",\n \"tests/test_client.py::TestClient::test_list_performances_no_results\",\n \"tests/test_client.py::TestClient::test_list_performances\",\n \"tests/test_client.py::TestClient::test_list_performances_cost_range\",\n \"tests/test_client.py::TestClient::test_list_performances_best_value_offer\",\n \"tests/test_client.py::TestClient::test_list_performances_max_saving_offer\",\n \"tests/test_client.py::TestClient::test_list_performances_min_cost_offer\",\n \"tests/test_client.py::TestClient::test_list_performances_top_price_offer\",\n \"tests/test_client.py::TestClient::test_list_performances_no_singles_data\",\n \"tests/test_client.py::TestClient::test_list_performances_availability\",\n \"tests/test_client.py::TestClient::test_list_performances_pagination\",\n \"tests/test_client.py::TestClient::test_list_performances_with_start_date\",\n \"tests/test_client.py::TestClient::test_list_performancess_with_end_date\",\n \"tests/test_client.py::TestClient::test_list_performances_with_start_and_end_date\",\n \"tests/test_client.py::TestClient::test_list_performances_misc_kwargs\",\n \"tests/test_client.py::TestClient::test_get_performances\",\n \"tests/test_client.py::TestClient::test_get_performances_no_performances\",\n \"tests/test_client.py::TestClient::test_get_performances_misc_kwargs\",\n \"tests/test_client.py::TestClient::test_get_performance\",\n \"tests/test_client.py::TestClient::test_get_availability\",\n \"tests/test_client.py::TestClient::test_get_availability_with_number_of_seats\",\n \"tests/test_client.py::TestClient::test_get_availability_with_discounts\",\n \"tests/test_client.py::TestClient::test_get_availability_with_example_seats\",\n \"tests/test_client.py::TestClient::test_get_availability_with_seat_blocks\",\n \"tests/test_client.py::TestClient::test_get_availability_with_user_commission\",\n \"tests/test_client.py::TestClient::test_get_availability_no_availability\",\n \"tests/test_client.py::TestClient::test_get_send_methods\",\n \"tests/test_client.py::TestClient::test_get_send_methods_bad_data\",\n \"tests/test_client.py::TestClient::test_get_discounts\",\n \"tests/test_client.py::TestClient::test_get_discounts_bad_data\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_trolley_token\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_performance_id\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_number_of_seats\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_ticket_type_code\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_price_band_code\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_item_numbers_to_remove\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_item_numbers_to_remove_with_no_token\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_seats\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_discounts\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_send_codes\",\n \"tests/test_client.py::TestClient::test_trolley_params_with_invalid_send_codes\",\n \"tests/test_client.py::TestClient::test_get_trolley\",\n \"tests/test_client.py::TestClient::test_get_upsells\",\n \"tests/test_client.py::TestClient::test_get_addons\",\n \"tests/test_client.py::TestClient::test_make_reservation\",\n \"tests/test_client.py::TestClient::test_get_status\",\n \"tests/test_client.py::TestClient::test_get_status_with_trans\",\n \"tests/test_client.py::TestClient::test_test\",\n \"tests/test_client.py::TestClient::test_release_reservation\",\n \"tests/test_client.py::TestClient::test_make_purchase_card_details\",\n \"tests/test_client.py::TestClient::test_make_purchase_redirection\",\n \"tests/test_client.py::TestClient::test_make_purchase_credit\",\n \"tests/test_client.py::TestClient::test_make_purchase_opting_out_of_confirmation_email\",\n \"tests/test_client.py::TestClient::test_next_callout\",\n \"tests/test_client.py::TestClient::test_next_callout_with_additional_callout\",\n \"tests/test_client.py::TestClient::test_auth_can_be_overridden_with_subclass\",\n \"tests/test_client.py::TestClient::test_extra_params_can_be_overriden_by_subclass\",\n \"tests/test_client.py::TestClient::test_get_auth_params_raises_deprecation_warning\",\n \"tests/test_client.py::TestClient::test_make_request_using_decimal_parsing\",\n \"tests/test_client.py::TestClient::test_make_request_using_float_parsing\",\n \"tests/test_reservation.py::TestReservation::test_from_api_data\",\n \"tests/test_trolley.py::TestTrolley::test_from_api_data_with_trolley_data\",\n \"tests/test_trolley.py::TestTrolley::test_from_api_data_with_reservation_data\",\n \"tests/test_trolley.py::TestTrolley::test_get_events\",\n \"tests/test_trolley.py::TestTrolley::test_get_events_with_no_bundles\",\n \"tests/test_trolley.py::TestTrolley::test_get_event_ids\",\n \"tests/test_trolley.py::TestTrolley::test_get_bundle\",\n \"tests/test_trolley.py::TestTrolley::test_get_bundle_when_none\",\n \"tests/test_trolley.py::TestTrolley::test_get_bundle_when_no_match\",\n \"tests/test_trolley.py::TestTrolley::test_get_item\",\n \"tests/test_trolley.py::TestTrolley::test_get_orders\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2597,"string":"2,597"},"num_tokens_patch":{"kind":"number","value":2027,"string":"2,027"},"before_filepaths":{"kind":"list like","value":["pyticketswitch/client.py","pyticketswitch/exceptions.py","pyticketswitch/reservation.py","pyticketswitch/trolley.py"],"string":"[\n \"pyticketswitch/client.py\",\n \"pyticketswitch/exceptions.py\",\n \"pyticketswitch/reservation.py\",\n \"pyticketswitch/trolley.py\"\n]"}}},{"rowIdx":587,"cells":{"instance_id":{"kind":"string","value":"capitalone__datacompy-18"},"base_commit":{"kind":"string","value":"370f7efbe1a5206c525a6da40410442a4ce8d51c"},"created_at":{"kind":"string","value":"2018-05-30 04:59:48"},"environment_setup_commit":{"kind":"string","value":"246aad8c381f7591512f6ecef9debf6341261578"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/datacompy/core.py b/datacompy/core.py\nindex 7fc296e..e03d75e 100644\n--- a/datacompy/core.py\n+++ b/datacompy/core.py\n@@ -59,6 +59,8 @@ class Compare(object):\n more easily track the dataframes.\n df2_name : str, optional\n A string name for the second dataframe\n+ ignore_spaces : bool, optional\n+ Flag to strip whitespace (including newlines) from string columns\n \n Attributes\n ----------\n@@ -70,7 +72,7 @@ class Compare(object):\n \n def __init__(\n self, df1, df2, join_columns=None, on_index=False, abs_tol=0,\n- rel_tol=0, df1_name='df1', df2_name='df2'):\n+ rel_tol=0, df1_name='df1', df2_name='df2', ignore_spaces=False):\n \n if on_index and join_columns is not None:\n raise Exception('Only provide on_index or join_columns')\n@@ -93,7 +95,7 @@ class Compare(object):\n self.rel_tol = rel_tol\n self.df1_unq_rows = self.df2_unq_rows = self.intersect_rows = None\n self.column_stats = []\n- self._compare()\n+ self._compare(ignore_spaces)\n \n @property\n def df1(self):\n@@ -143,7 +145,7 @@ class Compare(object):\n if len(dataframe.drop_duplicates(subset=self.join_columns)) < len(dataframe):\n self._any_dupes = True\n \n- def _compare(self):\n+ def _compare(self, ignore_spaces):\n \"\"\"Actually run the comparison. This tries to run df1.equals(df2)\n first so that if they're truly equal we can tell.\n \n@@ -167,8 +169,8 @@ class Compare(object):\n LOG.info('Number of columns in df2 and not in df1: {}'.format(\n len(self.df2_unq_columns())))\n LOG.debug('Merging dataframes')\n- self._dataframe_merge()\n- self._intersect_compare()\n+ self._dataframe_merge(ignore_spaces)\n+ self._intersect_compare(ignore_spaces)\n if self.matches():\n LOG.info('df1 matches df2')\n else:\n@@ -186,7 +188,7 @@ class Compare(object):\n \"\"\"Get columns that are shared between the two dataframes\"\"\"\n return set(self.df1.columns) & set(self.df2.columns)\n \n- def _dataframe_merge(self):\n+ def _dataframe_merge(self, ignore_spaces):\n \"\"\"Merge df1 to df2 on the join columns, to get df1 - df2, df2 - df1\n and df1 & df2\n \n@@ -262,7 +264,7 @@ class Compare(object):\n 'Number of rows in df1 and df2 (not necessarily equal): {}'.format(\n len(self.intersect_rows)))\n \n- def _intersect_compare(self):\n+ def _intersect_compare(self, ignore_spaces):\n \"\"\"Run the comparison on the intersect dataframe\n \n This loops through all columns that are shared between df1 and df2, and\n@@ -285,7 +287,8 @@ class Compare(object):\n self.intersect_rows[col_1],\n self.intersect_rows[col_2],\n self.rel_tol,\n- self.abs_tol)\n+ self.abs_tol,\n+ ignore_spaces)\n match_cnt = self.intersect_rows[col_match].sum()\n \n try:\n@@ -570,7 +573,7 @@ def render(filename, *fields):\n return file_open.read().format(*fields)\n \n \n-def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):\n+def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False):\n \"\"\"Compares two columns from a dataframe, returning a True/False series,\n with the same index as column 1.\n \n@@ -592,6 +595,8 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):\n Relative tolerance\n abs_tol : float, optional\n Absolute tolerance\n+ ignore_spaces : bool, optional\n+ Flag to strip whitespace (including newlines) from string columns\n \n Returns\n -------\n@@ -616,6 +621,12 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):\n equal_nan=True))\n except (ValueError, TypeError):\n try:\n+ if ignore_spaces:\n+ if col_1.dtype.kind == 'O':\n+ col_1 = col_1.str.strip()\n+ if col_2.dtype.kind == 'O':\n+ col_2 = col_2.str.strip()\n+ \n if set([col_1.dtype.kind, col_2.dtype.kind]) == set(['M','O']):\n compare = compare_string_and_date_columns(col_1, col_2)\n else:\n"},"problem_statement":{"kind":"string","value":"Would be useful to have a parameter to strip spaces for comparison\nAs probably expected, the following code will return a mismatch since 'B'<>'B ':\r\n```\r\nimport pandas as pd\r\nimport datacompy\r\n\r\ndf1 = pd.DataFrame([\r\n {'id': 1234, 'column_value': 'A'},\r\n {'id': 2345, 'column_value': 'B'}])\r\n\r\ndf2 = pd.DataFrame([\r\n {'id': 1234, 'column_value': 'A'},\r\n {'id': 2345, 'column_value': 'B '}])\r\n\r\ncompare = datacompy.Compare(\r\n df1,\r\n df2,\r\n join_columns='id',\r\n abs_tol=0,\r\n rel_tol=0,\r\n )\r\ncompare.matches(ignore_extra_columns=False)\r\n# False\r\n\r\n# This method prints out a human-readable report summarizing and sampling differences\r\nprint(compare.report())\r\n```\r\nWhat I propose is an optional parameter to ignore differences where the only difference is leading or trailing spaces. In this example it is obvious that there is a trailing space. However, when we are dealing with extracts from different databases/source files, without real control over the ETL of these, sometimes we can't prevent these discrepancies. We may wish to ignore these types of mismatches to identify 'worse' mismatches more effectively.\r\n\r\nAnother candidate could be ignoring case sensitivity differences.\r\nOf course these could both be easily handled with preprocessing the dataframes, but still could be some convenient enhancements!"},"repo":{"kind":"string","value":"capitalone/datacompy"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_core.py b/tests/test_core.py\nindex d236427..f3e8437 100644\n--- a/tests/test_core.py\n+++ b/tests/test_core.py\n@@ -85,6 +85,28 @@ something||False\n assert_series_equal(expect_out, actual_out, check_names=False)\n \n \n+def test_string_columns_equal_with_ignore_spaces():\n+ data = '''a|b|expected\n+Hi|Hi|True\n+Yo|Yo|True\n+Hey|Hey |True\n+résumé|resume|False\n+résumé|résumé|True\n+💩|💩|True\n+💩|🤔|False\n+ | |True\n+ | |True\n+datacompy|DataComPy|False\n+something||False\n+|something|False\n+||True'''\n+ df = pd.read_csv(six.StringIO(data), sep='|')\n+ actual_out = datacompy.columns_equal(\n+ df.a, df.b, rel_tol=0.2, ignore_spaces=True)\n+ expect_out = df['expected']\n+ assert_series_equal(expect_out, actual_out, check_names=False)\n+\n+\n def test_date_columns_equal():\n data = '''a|b|expected\n 2017-01-01|2017-01-01|True\n@@ -110,6 +132,34 @@ def test_date_columns_equal():\n assert_series_equal(expect_out, actual_out_rev, check_names=False)\n \n \n+def test_date_columns_equal_with_ignore_spaces():\n+ data = '''a|b|expected\n+2017-01-01|2017-01-01 |True\n+2017-01-02 |2017-01-02|True\n+2017-10-01 |2017-10-10 |False\n+2017-01-01||False\n+|2017-01-01|False\n+||True'''\n+ df = pd.read_csv(six.StringIO(data), sep='|')\n+ #First compare just the strings\n+ actual_out = datacompy.columns_equal(\n+ df.a, df.b, rel_tol=0.2, ignore_spaces=True)\n+ expect_out = df['expected']\n+ assert_series_equal(expect_out, actual_out, check_names=False)\n+\n+ #Then compare converted to datetime objects\n+ df['a'] = pd.to_datetime(df['a'])\n+ df['b'] = pd.to_datetime(df['b'])\n+ actual_out = datacompy.columns_equal(\n+ df.a, df.b, rel_tol=0.2, ignore_spaces=True)\n+ expect_out = df['expected']\n+ assert_series_equal(expect_out, actual_out, check_names=False)\n+ #and reverse\n+ actual_out_rev = datacompy.columns_equal(\n+ df.b, df.a, rel_tol=0.2, ignore_spaces=True)\n+ assert_series_equal(expect_out, actual_out_rev, check_names=False)\n+\n+\n \n def test_date_columns_unequal():\n \"\"\"I want datetime fields to match with dates stored as strings\n@@ -250,6 +300,20 @@ def test_mixed_column():\n assert_series_equal(expect_out, actual_out, check_names=False)\n \n \n+def test_mixed_column_with_ignore_spaces():\n+ df = pd.DataFrame([\n+ {'a': 'hi', 'b': 'hi ', 'expected': True},\n+ {'a': 1, 'b': 1, 'expected': True},\n+ {'a': np.inf, 'b': np.inf, 'expected': True},\n+ {'a': Decimal('1'), 'b': Decimal('1'), 'expected': True},\n+ {'a': 1, 'b': '1 ', 'expected': False},\n+ {'a': 1, 'b': 'yo ', 'expected': False}\n+ ])\n+ actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)\n+ expect_out = df['expected']\n+ assert_series_equal(expect_out, actual_out, check_names=False)\n+\n+\n def test_compare_df_setter_bad():\n df = pd.DataFrame([{'a': 1, 'A': 2}, {'a': 2, 'A': 2}])\n with raises(TypeError, message='df1 must be a pandas DataFrame'):\n@@ -565,4 +629,52 @@ def test_dupes_from_real_data():\n assert compare_unq.matches()\n #Just render the report to make sure it renders.\n t = compare_acct.report()\n- r = compare_unq.report()\n\\ No newline at end of file\n+ r = compare_unq.report()\n+\n+\n+def test_strings_with_joins_with_ignore_spaces():\n+ df1 = pd.DataFrame([{'a': 'hi', 'b': ' A'}, {'a': 'bye', 'b': 'A'}])\n+ df2 = pd.DataFrame([{'a': 'hi', 'b': 'A'}, {'a': 'bye', 'b': 'A '}])\n+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=False)\n+ assert not compare.matches()\n+ assert compare.all_columns_match()\n+ assert compare.all_rows_overlap()\n+ assert not compare.intersect_rows_match()\n+\n+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=True)\n+ assert compare.matches()\n+ assert compare.all_columns_match()\n+ assert compare.all_rows_overlap()\n+ assert compare.intersect_rows_match()\n+\n+\n+def test_decimal_with_joins_with_ignore_spaces():\n+ df1 = pd.DataFrame([{'a': 1, 'b': ' A'}, {'a': 2, 'b': 'A'}])\n+ df2 = pd.DataFrame([{'a': 1, 'b': 'A'}, {'a': 2, 'b': 'A '}])\n+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=False)\n+ assert not compare.matches()\n+ assert compare.all_columns_match()\n+ assert compare.all_rows_overlap()\n+ assert not compare.intersect_rows_match()\n+\n+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=True)\n+ assert compare.matches()\n+ assert compare.all_columns_match()\n+ assert compare.all_rows_overlap()\n+ assert compare.intersect_rows_match()\n+\n+\n+def test_index_with_joins_with_ignore_spaces():\n+ df1 = pd.DataFrame([{'a': 1, 'b': ' A'}, {'a': 2, 'b': 'A'}])\n+ df2 = pd.DataFrame([{'a': 1, 'b': 'A'}, {'a': 2, 'b': 'A '}])\n+ compare = datacompy.Compare(df1, df2, on_index=True, ignore_spaces=False)\n+ assert not compare.matches()\n+ assert compare.all_columns_match()\n+ assert compare.all_rows_overlap()\n+ assert not compare.intersect_rows_match()\n+\n+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=True)\n+ assert compare.matches()\n+ assert compare.all_columns_match()\n+ assert compare.all_rows_overlap()\n+ assert compare.intersect_rows_match()\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 0\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"0.5"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest>=3.0.6\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs==22.2.0\ncertifi==2021.5.30\n-e git+https://github.com/capitalone/datacompy.git@370f7efbe1a5206c525a6da40410442a4ce8d51c#egg=datacompy\nimportlib-metadata==4.8.3\niniconfig==1.1.1\nnumpy==1.19.5\npackaging==21.3\npandas==1.1.5\npluggy==1.0.0\npy==1.11.0\npyparsing==3.1.4\npytest==7.0.1\npython-dateutil==2.9.0.post0\npytz==2025.2\nsix==1.17.0\ntomli==1.2.3\ntyping_extensions==4.1.1\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: datacompy\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - attrs==22.2.0\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - numpy==1.19.5\n - packaging==21.3\n - pandas==1.1.5\n - pluggy==1.0.0\n - py==1.11.0\n - pyparsing==3.1.4\n - pytest==7.0.1\n - python-dateutil==2.9.0.post0\n - pytz==2025.2\n - six==1.17.0\n - tomli==1.2.3\n - typing-extensions==4.1.1\n - zipp==3.6.0\nprefix: /opt/conda/envs/datacompy\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_core.py::test_string_columns_equal_with_ignore_spaces","tests/test_core.py::test_date_columns_equal_with_ignore_spaces","tests/test_core.py::test_mixed_column_with_ignore_spaces","tests/test_core.py::test_strings_with_joins_with_ignore_spaces","tests/test_core.py::test_decimal_with_joins_with_ignore_spaces","tests/test_core.py::test_index_with_joins_with_ignore_spaces"],"string":"[\n \"tests/test_core.py::test_string_columns_equal_with_ignore_spaces\",\n \"tests/test_core.py::test_date_columns_equal_with_ignore_spaces\",\n \"tests/test_core.py::test_mixed_column_with_ignore_spaces\",\n \"tests/test_core.py::test_strings_with_joins_with_ignore_spaces\",\n \"tests/test_core.py::test_decimal_with_joins_with_ignore_spaces\",\n \"tests/test_core.py::test_index_with_joins_with_ignore_spaces\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["tests/test_core.py::test_compare_df_setter_bad","tests/test_core.py::test_compare_df_setter_bad_index","tests/test_core.py::test_compare_on_index_and_join_columns","tests/test_core.py::test_simple_dupes_index"],"string":"[\n \"tests/test_core.py::test_compare_df_setter_bad\",\n \"tests/test_core.py::test_compare_df_setter_bad_index\",\n \"tests/test_core.py::test_compare_on_index_and_join_columns\",\n \"tests/test_core.py::test_simple_dupes_index\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_core.py::test_numeric_columns_equal_abs","tests/test_core.py::test_numeric_columns_equal_rel","tests/test_core.py::test_string_columns_equal","tests/test_core.py::test_date_columns_equal","tests/test_core.py::test_date_columns_unequal","tests/test_core.py::test_bad_date_columns","tests/test_core.py::test_rounded_date_columns","tests/test_core.py::test_decimal_float_columns_equal","tests/test_core.py::test_decimal_float_columns_equal_rel","tests/test_core.py::test_decimal_columns_equal","tests/test_core.py::test_decimal_columns_equal_rel","tests/test_core.py::test_infinity_and_beyond","tests/test_core.py::test_mixed_column","tests/test_core.py::test_compare_df_setter_good","tests/test_core.py::test_compare_df_setter_different_cases","tests/test_core.py::test_compare_df_setter_good_index","tests/test_core.py::test_columns_overlap","tests/test_core.py::test_columns_no_overlap","tests/test_core.py::test_10k_rows","tests/test_core.py::test_subset","tests/test_core.py::test_not_subset","tests/test_core.py::test_large_subset","tests/test_core.py::test_string_joiner","tests/test_core.py::test_decimal_with_joins","tests/test_core.py::test_decimal_with_nulls","tests/test_core.py::test_strings_with_joins","tests/test_core.py::test_index_joining","tests/test_core.py::test_index_joining_strings_i_guess","tests/test_core.py::test_index_joining_non_overlapping","tests/test_core.py::test_temp_column_name","tests/test_core.py::test_temp_column_name_one_has","tests/test_core.py::test_temp_column_name_both_have","tests/test_core.py::test_temp_column_name_one_already","tests/test_core.py::test_simple_dupes_one_field","tests/test_core.py::test_simple_dupes_two_fields","tests/test_core.py::test_simple_dupes_one_field_two_vals","tests/test_core.py::test_simple_dupes_one_field_three_to_two_vals","tests/test_core.py::test_dupes_from_real_data"],"string":"[\n \"tests/test_core.py::test_numeric_columns_equal_abs\",\n \"tests/test_core.py::test_numeric_columns_equal_rel\",\n \"tests/test_core.py::test_string_columns_equal\",\n \"tests/test_core.py::test_date_columns_equal\",\n \"tests/test_core.py::test_date_columns_unequal\",\n \"tests/test_core.py::test_bad_date_columns\",\n \"tests/test_core.py::test_rounded_date_columns\",\n \"tests/test_core.py::test_decimal_float_columns_equal\",\n \"tests/test_core.py::test_decimal_float_columns_equal_rel\",\n \"tests/test_core.py::test_decimal_columns_equal\",\n \"tests/test_core.py::test_decimal_columns_equal_rel\",\n \"tests/test_core.py::test_infinity_and_beyond\",\n \"tests/test_core.py::test_mixed_column\",\n \"tests/test_core.py::test_compare_df_setter_good\",\n \"tests/test_core.py::test_compare_df_setter_different_cases\",\n \"tests/test_core.py::test_compare_df_setter_good_index\",\n \"tests/test_core.py::test_columns_overlap\",\n \"tests/test_core.py::test_columns_no_overlap\",\n \"tests/test_core.py::test_10k_rows\",\n \"tests/test_core.py::test_subset\",\n \"tests/test_core.py::test_not_subset\",\n \"tests/test_core.py::test_large_subset\",\n \"tests/test_core.py::test_string_joiner\",\n \"tests/test_core.py::test_decimal_with_joins\",\n \"tests/test_core.py::test_decimal_with_nulls\",\n \"tests/test_core.py::test_strings_with_joins\",\n \"tests/test_core.py::test_index_joining\",\n \"tests/test_core.py::test_index_joining_strings_i_guess\",\n \"tests/test_core.py::test_index_joining_non_overlapping\",\n \"tests/test_core.py::test_temp_column_name\",\n \"tests/test_core.py::test_temp_column_name_one_has\",\n \"tests/test_core.py::test_temp_column_name_both_have\",\n \"tests/test_core.py::test_temp_column_name_one_already\",\n \"tests/test_core.py::test_simple_dupes_one_field\",\n \"tests/test_core.py::test_simple_dupes_two_fields\",\n \"tests/test_core.py::test_simple_dupes_one_field_two_vals\",\n \"tests/test_core.py::test_simple_dupes_one_field_three_to_two_vals\",\n \"tests/test_core.py::test_dupes_from_real_data\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2598,"string":"2,598"},"num_tokens_patch":{"kind":"number","value":1126,"string":"1,126"},"before_filepaths":{"kind":"list like","value":["datacompy/core.py"],"string":"[\n \"datacompy/core.py\"\n]"}}},{"rowIdx":588,"cells":{"instance_id":{"kind":"string","value":"peterbe__hashin-65"},"base_commit":{"kind":"string","value":"bbe0b6c379e25fbd8d3e702473e8e29677ccd9c0"},"created_at":{"kind":"string","value":"2018-05-30 21:13:24"},"environment_setup_commit":{"kind":"string","value":"bbe0b6c379e25fbd8d3e702473e8e29677ccd9c0"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/hashin.py b/hashin.py\nindex c1bb79b..1590560 100755\n--- a/hashin.py\n+++ b/hashin.py\n@@ -58,6 +58,11 @@ parser.add_argument(\n help='Verbose output',\n action='store_true',\n )\n+parser.add_argument(\n+ '--include-prereleases',\n+ help='Include pre-releases (off by default)',\n+ action='store_true',\n+)\n parser.add_argument(\n '-p', '--python-version',\n help='Python version to add wheels for. May be used multiple times.',\n@@ -83,6 +88,10 @@ class PackageError(Exception):\n pass\n \n \n+class NoVersionsError(Exception):\n+ \"\"\"When there are no valid versions found.\"\"\"\n+\n+\n def _verbose(*args):\n print('* ' + ' '.join(args))\n \n@@ -127,6 +136,7 @@ def run_single_package(\n algorithm,\n python_versions=None,\n verbose=False,\n+ include_prereleases=False,\n ):\n restriction = None\n if ';' in spec:\n@@ -143,7 +153,8 @@ def run_single_package(\n version=version,\n verbose=verbose,\n python_versions=python_versions,\n- algorithm=algorithm\n+ algorithm=algorithm,\n+ include_prereleases=include_prereleases,\n )\n package = data['package']\n \n@@ -202,7 +213,7 @@ def amend_requirements_content(requirements, package, new_lines):\n return requirements\n \n \n-def get_latest_version(data):\n+def get_latest_version(data, include_prereleases):\n \"\"\"\n Return the version string of what we think is the latest version.\n In the data blob from PyPI there is the info->version key which\n@@ -214,11 +225,22 @@ def get_latest_version(data):\n # This feels kinda strange but it has worked for years\n return data['info']['version']\n all_versions = []\n+ count_prereleases = 0\n for version in data['releases']:\n v = parse(version)\n- if not v.is_prerelease:\n+ if not v.is_prerelease or include_prereleases:\n all_versions.append((v, version))\n+ else:\n+ count_prereleases += 1\n all_versions.sort(reverse=True)\n+ if not all_versions:\n+ msg = \"Not a single valid version found.\"\n+ if not include_prereleases and count_prereleases:\n+ msg += (\n+ \" But, found {0} pre-releases. Consider running again \"\n+ \"with the --include-prereleases flag.\"\n+ )\n+ raise NoVersionsError(msg)\n # return the highest non-pre-release version\n return str(all_versions[0][1])\n \n@@ -378,6 +400,7 @@ def get_package_hashes(\n algorithm=DEFAULT_ALGORITHM,\n python_versions=(),\n verbose=False,\n+ include_prereleases=False,\n ):\n \"\"\"\n Gets the hashes for the given package.\n@@ -404,7 +427,7 @@ def get_package_hashes(\n \"\"\"\n data = get_package_data(package, verbose)\n if not version:\n- version = get_latest_version(data)\n+ version = get_latest_version(data, include_prereleases)\n assert version\n if verbose:\n _verbose('Latest version for {0} is {1}'.format(\n@@ -472,6 +495,7 @@ def main():\n args.algorithm,\n args.python_version,\n verbose=args.verbose,\n+ include_prereleases=args.include_prereleases,\n )\n except PackageError as exception:\n print(str(exception), file=sys.stderr)\n"},"problem_statement":{"kind":"string","value":"`hashin black` fails\n```\r\n▶ hashin black\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/hashin\", line 11, in \r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.6/site-packages/hashin.py\", line 474, in main\r\n verbose=args.verbose,\r\n File \"/usr/local/lib/python3.6/site-packages/hashin.py\", line 120, in run\r\n run_single_package(spec, *args, **kwargs)\r\n File \"/usr/local/lib/python3.6/site-packages/hashin.py\", line 146, in run_single_package\r\n algorithm=algorithm\r\n File \"/usr/local/lib/python3.6/site-packages/hashin.py\", line 407, in get_package_hashes\r\n version = get_latest_version(data)\r\n File \"/usr/local/lib/python3.6/site-packages/hashin.py\", line 223, in get_latest_version\r\n return str(all_versions[0][1])\r\nIndexError: list index out of range\r\n```\r\n"},"repo":{"kind":"string","value":"peterbe/hashin"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_arg_parse.py b/tests/test_arg_parse.py\nindex 7d00e74..a6b6236 100644\n--- a/tests/test_arg_parse.py\n+++ b/tests/test_arg_parse.py\n@@ -18,6 +18,7 @@ def test_everything():\n requirements_file='reqs.txt',\n verbose=True,\n version=False,\n+ include_prereleases=False,\n )\n assert args == (expected, [])\n \n@@ -37,6 +38,7 @@ def test_everything_long():\n requirements_file='reqs.txt',\n verbose=True,\n version=False,\n+ include_prereleases=False,\n )\n assert args == (expected, [])\n \n@@ -50,5 +52,6 @@ def test_minimal():\n requirements_file='requirements.txt',\n verbose=False,\n version=False,\n+ include_prereleases=False,\n )\n assert args == (expected, [])\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\nindex c0b566f..72cf2de 100644\n--- a/tests/test_cli.py\n+++ b/tests/test_cli.py\n@@ -73,7 +73,10 @@ class Tests(TestCase):\n \n @mock.patch('hashin.urlopen')\n def test_get_latest_version_simple(self, murlopen):\n- version = hashin.get_latest_version({'info': {'version': '0.3'}})\n+ version = hashin.get_latest_version(\n+ {'info': {'version': '0.3'}},\n+ False\n+ )\n self.assertEqual(version, '0.3')\n \n @mock.patch('hashin.urlopen')\n@@ -91,9 +94,43 @@ class Tests(TestCase):\n '2.0b2': {},\n '2.0c3': {},\n }\n- })\n+ }, False)\n self.assertEqual(version, '0.999')\n \n+ @mock.patch('hashin.urlopen')\n+ def test_get_latest_version_only_pre_release(self, murlopen):\n+ self.assertRaises(\n+ hashin.NoVersionsError,\n+ hashin.get_latest_version,\n+ {\n+ 'info': {\n+ 'version': '0.3',\n+ },\n+ 'releases': {\n+ '1.1.0rc1': {},\n+ '1.1rc1': {},\n+ '1.0a1': {},\n+ '2.0b2': {},\n+ '2.0c3': {},\n+ }\n+ },\n+ False,\n+ )\n+\n+ version = hashin.get_latest_version({\n+ 'info': {\n+ 'version': '0.3',\n+ },\n+ 'releases': {\n+ '1.1.0rc1': {},\n+ '1.1rc1': {},\n+ '1.0a1': {},\n+ '2.0b2': {},\n+ '2.0c3': {},\n+ }\n+ }, True)\n+ self.assertEqual(version, '2.0c3')\n+\n @mock.patch('hashin.urlopen')\n def test_get_latest_version_non_pre_release_leading_zeros(self, murlopen):\n version = hashin.get_latest_version({\n@@ -105,7 +142,7 @@ class Tests(TestCase):\n '0.04.21': {},\n '0.04.09': {},\n }\n- })\n+ }, False)\n self.assertEqual(version, '0.04.21')\n \n @mock.patch('hashin.urlopen')\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_many_hunks\",\n \"has_pytest_match_arg\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 2,\n \"test_score\": 2\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"unknown"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"mock\"\n ],\n \"pre_install\": null,\n \"python\": \"3.9\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"exceptiongroup==1.2.2\n-e git+https://github.com/peterbe/hashin.git@bbe0b6c379e25fbd8d3e702473e8e29677ccd9c0#egg=hashin\niniconfig==2.1.0\nmock==5.2.0\npackaging==24.2\npip-api==0.0.34\npluggy==1.5.0\npytest==8.3.5\ntomli==2.2.1\n"},"environment":{"kind":"string","value":"name: hashin\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - exceptiongroup==1.2.2\n - iniconfig==2.1.0\n - mock==5.2.0\n - packaging==24.2\n - pip-api==0.0.34\n - pluggy==1.5.0\n - pytest==8.3.5\n - tomli==2.2.1\nprefix: /opt/conda/envs/hashin\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_arg_parse.py::test_everything","tests/test_arg_parse.py::test_everything_long","tests/test_arg_parse.py::test_minimal","tests/test_cli.py::Tests::test_get_latest_version_non_pre_release","tests/test_cli.py::Tests::test_get_latest_version_non_pre_release_leading_zeros","tests/test_cli.py::Tests::test_get_latest_version_only_pre_release","tests/test_cli.py::Tests::test_get_latest_version_simple"],"string":"[\n \"tests/test_arg_parse.py::test_everything\",\n \"tests/test_arg_parse.py::test_everything_long\",\n \"tests/test_arg_parse.py::test_minimal\",\n \"tests/test_cli.py::Tests::test_get_latest_version_non_pre_release\",\n \"tests/test_cli.py::Tests::test_get_latest_version_non_pre_release_leading_zeros\",\n \"tests/test_cli.py::Tests::test_get_latest_version_only_pre_release\",\n \"tests/test_cli.py::Tests::test_get_latest_version_simple\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_cli.py::Tests::test_amend_requirements_content_new","tests/test_cli.py::Tests::test_amend_requirements_content_new_similar_name","tests/test_cli.py::Tests::test_amend_requirements_content_replacement","tests/test_cli.py::Tests::test_amend_requirements_content_replacement_2","tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others","tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others_2","tests/test_cli.py::Tests::test_amend_requirements_content_replacement_single_to_multi","tests/test_cli.py::Tests::test_expand_python_version","tests/test_cli.py::Tests::test_filter_releases","tests/test_cli.py::Tests::test_get_hashes_error","tests/test_cli.py::Tests::test_get_package_hashes","tests/test_cli.py::Tests::test_get_package_hashes_unknown_algorithm","tests/test_cli.py::Tests::test_get_package_hashes_without_version","tests/test_cli.py::Tests::test_main_packageerrors_stderr","tests/test_cli.py::Tests::test_main_version","tests/test_cli.py::Tests::test_non_200_ok_download","tests/test_cli.py::Tests::test_release_url_metadata_python","tests/test_cli.py::Tests::test_run","tests/test_cli.py::Tests::test_run_case_insensitive","tests/test_cli.py::Tests::test_run_contained_names","tests/test_cli.py::Tests::test_run_pep_0496","tests/test_cli.py::Tests::test_run_without_specific_version"],"string":"[\n \"tests/test_cli.py::Tests::test_amend_requirements_content_new\",\n \"tests/test_cli.py::Tests::test_amend_requirements_content_new_similar_name\",\n \"tests/test_cli.py::Tests::test_amend_requirements_content_replacement\",\n \"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_2\",\n \"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others\",\n \"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others_2\",\n \"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_single_to_multi\",\n \"tests/test_cli.py::Tests::test_expand_python_version\",\n \"tests/test_cli.py::Tests::test_filter_releases\",\n \"tests/test_cli.py::Tests::test_get_hashes_error\",\n \"tests/test_cli.py::Tests::test_get_package_hashes\",\n \"tests/test_cli.py::Tests::test_get_package_hashes_unknown_algorithm\",\n \"tests/test_cli.py::Tests::test_get_package_hashes_without_version\",\n \"tests/test_cli.py::Tests::test_main_packageerrors_stderr\",\n \"tests/test_cli.py::Tests::test_main_version\",\n \"tests/test_cli.py::Tests::test_non_200_ok_download\",\n \"tests/test_cli.py::Tests::test_release_url_metadata_python\",\n \"tests/test_cli.py::Tests::test_run\",\n \"tests/test_cli.py::Tests::test_run_case_insensitive\",\n \"tests/test_cli.py::Tests::test_run_contained_names\",\n \"tests/test_cli.py::Tests::test_run_pep_0496\",\n \"tests/test_cli.py::Tests::test_run_without_specific_version\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2603,"string":"2,603"},"num_tokens_patch":{"kind":"number","value":828,"string":"828"},"before_filepaths":{"kind":"list like","value":["hashin.py"],"string":"[\n \"hashin.py\"\n]"}}},{"rowIdx":589,"cells":{"instance_id":{"kind":"string","value":"HXLStandard__libhxl-python-174"},"base_commit":{"kind":"string","value":"1bc7e92a3844dd443f9e31f478357ea7b599c831"},"created_at":{"kind":"string","value":"2018-05-30 21:52:05"},"environment_setup_commit":{"kind":"string","value":"1bc7e92a3844dd443f9e31f478357ea7b599c831"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/hxl/validation.py b/hxl/validation.py\nindex 3eb1eba..922bfaf 100644\n--- a/hxl/validation.py\n+++ b/hxl/validation.py\n@@ -1508,11 +1508,6 @@ def validate(data, schema=None):\n \n issue_map = dict()\n \n- def make_rule_hash(rule):\n- \"\"\"Make a good-enough hash for a rule.\"\"\"\n- s = \"\\r\".join([str(rule.severity), str(rule.description), str(rule.tag_pattern)])\n- return base64.urlsafe_b64encode(hashlib.md5(s.encode('utf-8')).digest())[:8].decode('ascii')\n-\n def add_issue(issue):\n hash = make_rule_hash(issue.rule)\n issue_map.setdefault(hash, []).append(issue)\n@@ -1562,9 +1557,10 @@ def make_json_report(status, issue_map, schema_url=None, data_url=None):\n \n # add the issue objects\n for rule_id, locations in issue_map.items():\n- json_report['stats']['total'] += len(locations)\n- json_report['stats'][locations[0].rule.severity] += len(locations)\n- json_report['issues'].append(make_json_issue(rule_id, locations))\n+ json_issue = make_json_issue(rule_id, locations)\n+ json_report['stats']['total'] += len(json_issue['locations'])\n+ json_report['stats'][locations[0].rule.severity] += len(json_issue['locations'])\n+ json_report['issues'].append(json_issue)\n \n return json_report\n \n@@ -1581,6 +1577,15 @@ def make_json_issue(rule_id, locations):\n if not description:\n description = model.message\n \n+ # get all unique locations\n+ location_keys = set()\n+ json_locations = []\n+ for location in locations:\n+ location_key = (location.row.row_number, location.column.column_number, location.value, location.suggested_value,)\n+ if not location_key in location_keys:\n+ json_locations.append(make_json_location(location))\n+ location_keys.add(location_key)\n+\n # make the issue\n json_issue = {\n \"rule_id\": rule_id,\n@@ -1589,7 +1594,7 @@ def make_json_issue(rule_id, locations):\n \"severity\": model.rule.severity,\n \"location_count\": len(locations),\n \"scope\": model.scope,\n- \"locations\": [make_json_location(location) for location in locations]\n+ \"locations\": json_locations\n }\n \n return json_issue\n@@ -1622,4 +1627,10 @@ def make_json_location(location):\n \n return json_location\n \n+\n+def make_rule_hash(rule):\n+ \"\"\"Make a good-enough hash for a rule.\"\"\"\n+ s = \"\\r\".join([str(rule.severity), str(rule.description), str(rule.tag_pattern)])\n+ return base64.urlsafe_b64encode(hashlib.md5(s.encode('utf-8')).digest())[:8].decode('ascii')\n+\n # end\n"},"problem_statement":{"kind":"string","value":"Double counting of errors p-code adm name combination consistency errors\nWhen I put the below into data check, I get 2 of every cell eg. F3,F3,F4,F4,F5,F5...\r\nhttps://data.humdata.org/dataset/77c97850-4004-4285-94db-0b390a962d6e/resource/d6c0dbac-683d-42d7-82b4-a6379bd4f48e/download/mrt_population_statistics_ons_rgph_2013_2017.xlsx"},"repo":{"kind":"string","value":"HXLStandard/libhxl-python"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_validation.py b/tests/test_validation.py\nindex 43ab00b..7e8b4f3 100644\n--- a/tests/test_validation.py\n+++ b/tests/test_validation.py\n@@ -655,8 +655,8 @@ class TestValidateDataset(unittest.TestCase):\n def test_double_correlation(self):\n \"\"\"Test correlation when more than one column has same tagspec\"\"\"\n SCHEMA = [\n- ['#valid_tag', '#valid_correlation'],\n- ['#adm1+code', '#adm1+name']\n+ ['#valid_tag', '#description', '#valid_correlation', '#valid_value+list'],\n+ ['#adm1+code', 'xxxxx', '#adm1+name', 'X001|X002']\n ]\n DATASET = [\n ['#adm1+name', '#adm1+code', '#adm1+code'],\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_hyperlinks\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 3,\n \"test_score\": 2\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"4.7"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": null,\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"certifi==2025.1.31\ncharset-normalizer==3.4.1\nexceptiongroup==1.2.2\nidna==3.10\niniconfig==2.1.0\n-e git+https://github.com/HXLStandard/libhxl-python.git@1bc7e92a3844dd443f9e31f478357ea7b599c831#egg=libhxl\npackaging==24.2\npluggy==1.5.0\npytest==8.3.5\npython-dateutil==2.9.0.post0\npython-io-wrapper==0.3.1\nrequests==2.32.3\nsix==1.17.0\ntomli==2.2.1\nUnidecode==1.3.8\nurllib3==2.3.0\nxlrd==2.0.1\n"},"environment":{"kind":"string","value":"name: libhxl-python\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - certifi==2025.1.31\n - charset-normalizer==3.4.1\n - exceptiongroup==1.2.2\n - idna==3.10\n - iniconfig==2.1.0\n - packaging==24.2\n - pluggy==1.5.0\n - pytest==8.3.5\n - python-dateutil==2.9.0.post0\n - python-io-wrapper==0.3.1\n - requests==2.32.3\n - six==1.17.0\n - tomli==2.2.1\n - unidecode==1.3.8\n - urllib3==2.3.0\n - xlrd==2.0.1\nprefix: /opt/conda/envs/libhxl-python\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_validation.py::TestValidateDataset::test_double_correlation"],"string":"[\n \"tests/test_validation.py::TestValidateDataset::test_double_correlation\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["tests/test_validation.py::TestValidateDataset::test_default_schema","tests/test_validation.py::TestLoad::test_load_default","tests/test_validation.py::TestJSONSchema::test_truthy","tests/test_validation.py::TestJSONReport::test_default","tests/test_validation.py::TestJSONReport::test_top_level"],"string":"[\n \"tests/test_validation.py::TestValidateDataset::test_default_schema\",\n \"tests/test_validation.py::TestLoad::test_load_default\",\n \"tests/test_validation.py::TestJSONSchema::test_truthy\",\n \"tests/test_validation.py::TestJSONReport::test_default\",\n \"tests/test_validation.py::TestJSONReport::test_top_level\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_validation.py::TestTests::test_consistent_datatypes","tests/test_validation.py::TestTests::test_correlation","tests/test_validation.py::TestTests::test_datatype","tests/test_validation.py::TestTests::test_enumeration","tests/test_validation.py::TestTests::test_enumeration_suggested_values","tests/test_validation.py::TestTests::test_outliers","tests/test_validation.py::TestTests::test_range","tests/test_validation.py::TestTests::test_regex","tests/test_validation.py::TestTests::test_required","tests/test_validation.py::TestTests::test_spelling_case_insensitive","tests/test_validation.py::TestTests::test_spelling_case_sensitive","tests/test_validation.py::TestTests::test_unique_row","tests/test_validation.py::TestTests::test_unique_value","tests/test_validation.py::TestTests::test_whitespace","tests/test_validation.py::TestRule::test_datatype","tests/test_validation.py::TestRule::test_range","tests/test_validation.py::TestRule::test_regex","tests/test_validation.py::TestRule::test_row_restrictions","tests/test_validation.py::TestRule::test_value_enumeration","tests/test_validation.py::TestRule::test_whitespace","tests/test_validation.py::TestValidateColumns::test_bad_value_url","tests/test_validation.py::TestValidateColumns::test_min_occurs","tests/test_validation.py::TestValidateColumns::test_required","tests/test_validation.py::TestValidateRow::test_date","tests/test_validation.py::TestValidateRow::test_email","tests/test_validation.py::TestValidateRow::test_minmax","tests/test_validation.py::TestValidateRow::test_number","tests/test_validation.py::TestValidateRow::test_url","tests/test_validation.py::TestValidateDataset::test_consistent_datatype","tests/test_validation.py::TestValidateDataset::test_correlation","tests/test_validation.py::TestValidateDataset::test_different_indicator_datatypes","tests/test_validation.py::TestValidateDataset::test_outliers","tests/test_validation.py::TestValidateDataset::test_spellings","tests/test_validation.py::TestValidateDataset::test_spellings_multiple","tests/test_validation.py::TestValidateDataset::test_suggested_value_correlation_key","tests/test_validation.py::TestValidateDataset::test_unique_compound","tests/test_validation.py::TestValidateDataset::test_unique_single","tests/test_validation.py::TestLoad::test_load_bad","tests/test_validation.py::TestLoad::test_load_good","tests/test_validation.py::TestJSONReport::test_errors"],"string":"[\n \"tests/test_validation.py::TestTests::test_consistent_datatypes\",\n \"tests/test_validation.py::TestTests::test_correlation\",\n \"tests/test_validation.py::TestTests::test_datatype\",\n \"tests/test_validation.py::TestTests::test_enumeration\",\n \"tests/test_validation.py::TestTests::test_enumeration_suggested_values\",\n \"tests/test_validation.py::TestTests::test_outliers\",\n \"tests/test_validation.py::TestTests::test_range\",\n \"tests/test_validation.py::TestTests::test_regex\",\n \"tests/test_validation.py::TestTests::test_required\",\n \"tests/test_validation.py::TestTests::test_spelling_case_insensitive\",\n \"tests/test_validation.py::TestTests::test_spelling_case_sensitive\",\n \"tests/test_validation.py::TestTests::test_unique_row\",\n \"tests/test_validation.py::TestTests::test_unique_value\",\n \"tests/test_validation.py::TestTests::test_whitespace\",\n \"tests/test_validation.py::TestRule::test_datatype\",\n \"tests/test_validation.py::TestRule::test_range\",\n \"tests/test_validation.py::TestRule::test_regex\",\n \"tests/test_validation.py::TestRule::test_row_restrictions\",\n \"tests/test_validation.py::TestRule::test_value_enumeration\",\n \"tests/test_validation.py::TestRule::test_whitespace\",\n \"tests/test_validation.py::TestValidateColumns::test_bad_value_url\",\n \"tests/test_validation.py::TestValidateColumns::test_min_occurs\",\n \"tests/test_validation.py::TestValidateColumns::test_required\",\n \"tests/test_validation.py::TestValidateRow::test_date\",\n \"tests/test_validation.py::TestValidateRow::test_email\",\n \"tests/test_validation.py::TestValidateRow::test_minmax\",\n \"tests/test_validation.py::TestValidateRow::test_number\",\n \"tests/test_validation.py::TestValidateRow::test_url\",\n \"tests/test_validation.py::TestValidateDataset::test_consistent_datatype\",\n \"tests/test_validation.py::TestValidateDataset::test_correlation\",\n \"tests/test_validation.py::TestValidateDataset::test_different_indicator_datatypes\",\n \"tests/test_validation.py::TestValidateDataset::test_outliers\",\n \"tests/test_validation.py::TestValidateDataset::test_spellings\",\n \"tests/test_validation.py::TestValidateDataset::test_spellings_multiple\",\n \"tests/test_validation.py::TestValidateDataset::test_suggested_value_correlation_key\",\n \"tests/test_validation.py::TestValidateDataset::test_unique_compound\",\n \"tests/test_validation.py::TestValidateDataset::test_unique_single\",\n \"tests/test_validation.py::TestLoad::test_load_bad\",\n \"tests/test_validation.py::TestLoad::test_load_good\",\n \"tests/test_validation.py::TestJSONReport::test_errors\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"The Unlicense"},"__index_level_0__":{"kind":"number","value":2604,"string":"2,604"},"num_tokens_patch":{"kind":"number","value":678,"string":"678"},"before_filepaths":{"kind":"list like","value":["hxl/validation.py"],"string":"[\n \"hxl/validation.py\"\n]"}}},{"rowIdx":590,"cells":{"instance_id":{"kind":"string","value":"graphql-python__graphene-752"},"base_commit":{"kind":"string","value":"332214ba9c545b6d899e181a34666540f02848fe"},"created_at":{"kind":"string","value":"2018-06-01 01:53:33"},"environment_setup_commit":{"kind":"string","value":"f039af2810806ab42521426777b3a0d061b02802"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/graphene/types/inputobjecttype.py b/graphene/types/inputobjecttype.py\nindex dbfccc4..b84fc0f 100644\n--- a/graphene/types/inputobjecttype.py\n+++ b/graphene/types/inputobjecttype.py\n@@ -50,7 +50,10 @@ class InputObjectType(UnmountedType, BaseType):\n yank_fields_from_attrs(base.__dict__, _as=InputField)\n )\n \n- _meta.fields = fields\n+ if _meta.fields:\n+ _meta.fields.update(fields)\n+ else:\n+ _meta.fields = fields\n if container is None:\n container = type(cls.__name__, (InputObjectTypeContainer, cls), {})\n _meta.container = container\n"},"problem_statement":{"kind":"string","value":"InputObjectType.__init_sublcass_with_meta__ overwrites passed _meta.fields\nIn `InputObjectType.__init_subclass_with_meta__`, the`fields` of the `_meta` arg are overwritten, which can cause complications for subclassing.\r\n\r\n\r\n @classmethod\r\n def __init_subclass_with_meta__(cls, container=None, _meta=None, **options):\r\n if not _meta:\r\n _meta = InputObjectTypeOptions(cls)\r\n\r\n fields = OrderedDict()\r\n for base in reversed(cls.__mro__):\r\n fields.update(\r\n yank_fields_from_attrs(base.__dict__, _as=InputField)\r\n )\r\n\r\n _meta.fields = fields\r\n\r\n # should this be:\r\n # if _meta.fields:\r\n # _meta.fields.update(fields)\r\n # else:\r\n # _meta.fields = fields\r\n"},"repo":{"kind":"string","value":"graphql-python/graphene"},"test_patch":{"kind":"string","value":"diff --git a/graphene/tests/issues/test_720.py b/graphene/tests/issues/test_720.py\nnew file mode 100644\nindex 0000000..8cd99bd\n--- /dev/null\n+++ b/graphene/tests/issues/test_720.py\n@@ -0,0 +1,44 @@\n+# https://github.com/graphql-python/graphene/issues/720\n+# InputObjectTypes overwrite the \"fields\" attribute of the provided\n+# _meta object, so even if dynamic fields are provided with a standard\n+# InputObjectTypeOptions, they are ignored.\n+\n+import graphene\n+\n+\n+class MyInputClass(graphene.InputObjectType):\n+\n+ @classmethod\n+ def __init_subclass_with_meta__(\n+ cls, container=None, _meta=None, fields=None, **options):\n+ if _meta is None:\n+ _meta = graphene.types.inputobjecttype.InputObjectTypeOptions(cls)\n+ _meta.fields = fields\n+ super(MyInputClass, cls).__init_subclass_with_meta__(\n+ container=container, _meta=_meta, **options)\n+\n+\n+class MyInput(MyInputClass):\n+\n+ class Meta:\n+ fields = dict(x=graphene.Field(graphene.Int))\n+\n+\n+class Query(graphene.ObjectType):\n+ myField = graphene.Field(graphene.String, input=graphene.Argument(MyInput))\n+\n+ def resolve_myField(parent, info, input):\n+ return 'ok'\n+\n+\n+def test_issue():\n+ query_string = '''\n+ query myQuery {\n+ myField(input: {x: 1})\n+ }\n+ '''\n+\n+ schema = graphene.Schema(query=Query)\n+ result = schema.execute(query_string)\n+\n+ assert not result.errors\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [],\n \"has_test_patch\": true,\n \"is_lite\": true,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 1\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"2.1"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[test]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"aniso8601==3.0.2\nattrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work\ncertifi==2021.5.30\ncharset-normalizer==2.0.12\ncoverage==6.2\ncoveralls==3.3.1\ndocopt==0.6.2\nfastdiff==0.3.0\n-e git+https://github.com/graphql-python/graphene.git@332214ba9c545b6d899e181a34666540f02848fe#egg=graphene\ngraphql-core==2.3.2\ngraphql-relay==0.4.5\nidna==3.10\nimportlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\niso8601==1.1.0\nmock==5.2.0\nmore-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work\npackaging @ file:///tmp/build/80754af9/packaging_1637314298585/work\npluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work\npromise==2.3\npy @ file:///opt/conda/conda-bld/py_1644396412707/work\npy-cpuinfo==9.0.0\npyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work\npytest==6.2.4\npytest-benchmark==3.4.1\npytest-cov==4.0.0\npytest-mock==3.6.1\npytz==2025.2\nrequests==2.27.1\nRx==1.6.3\nsix==1.17.0\nsnapshottest==0.6.0\ntermcolor==1.1.0\ntoml @ file:///tmp/build/80754af9/toml_1616166611790/work\ntomli==1.2.3\ntyping_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work\nurllib3==1.26.20\nwasmer==1.1.0\nwasmer-compiler-cranelift==1.1.0\nzipp @ file:///tmp/build/80754af9/zipp_1633618647012/work\n"},"environment":{"kind":"string","value":"name: graphene\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - attrs=21.4.0=pyhd3eb1b0_0\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - importlib-metadata=4.8.1=py36h06a4308_0\n - importlib_metadata=4.8.1=hd3eb1b0_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - more-itertools=8.12.0=pyhd3eb1b0_0\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - packaging=21.3=pyhd3eb1b0_0\n - pip=21.2.2=py36h06a4308_0\n - pluggy=0.13.1=py36h06a4308_0\n - py=1.11.0=pyhd3eb1b0_0\n - pyparsing=3.0.4=pyhd3eb1b0_0\n - pytest=6.2.4=py36h06a4308_2\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - toml=0.10.2=pyhd3eb1b0_0\n - typing_extensions=4.1.1=pyh06a4308_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zipp=3.6.0=pyhd3eb1b0_0\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - aniso8601==3.0.2\n - charset-normalizer==2.0.12\n - coverage==6.2\n - coveralls==3.3.1\n - docopt==0.6.2\n - fastdiff==0.3.0\n - graphql-core==2.3.2\n - graphql-relay==0.4.5\n - idna==3.10\n - iso8601==1.1.0\n - mock==5.2.0\n - promise==2.3\n - py-cpuinfo==9.0.0\n - pytest-benchmark==3.4.1\n - pytest-cov==4.0.0\n - pytest-mock==3.6.1\n - pytz==2025.2\n - requests==2.27.1\n - rx==1.6.3\n - six==1.17.0\n - snapshottest==0.6.0\n - termcolor==1.1.0\n - tomli==1.2.3\n - urllib3==1.26.20\n - wasmer==1.1.0\n - wasmer-compiler-cranelift==1.1.0\nprefix: /opt/conda/envs/graphene\n"},"FAIL_TO_PASS":{"kind":"list like","value":["graphene/tests/issues/test_720.py::test_issue"],"string":"[\n \"graphene/tests/issues/test_720.py::test_issue\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2612,"string":"2,612"},"num_tokens_patch":{"kind":"number","value":165,"string":"165"},"before_filepaths":{"kind":"list like","value":["graphene/types/inputobjecttype.py"],"string":"[\n \"graphene/types/inputobjecttype.py\"\n]"}}},{"rowIdx":591,"cells":{"instance_id":{"kind":"string","value":"Yelp__swagger_spec_validator-95"},"base_commit":{"kind":"string","value":"40e1cc926775777ff2d56e271fd61697c6235579"},"created_at":{"kind":"string","value":"2018-06-05 10:37:03"},"environment_setup_commit":{"kind":"string","value":"40e1cc926775777ff2d56e271fd61697c6235579"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/swagger_spec_validator/validator20.py b/swagger_spec_validator/validator20.py\nindex fe17ded..002eb44 100644\n--- a/swagger_spec_validator/validator20.py\n+++ b/swagger_spec_validator/validator20.py\n@@ -268,6 +268,15 @@ def validate_defaults_in_definition(definition_spec, deref):\n validate_property_default(property_spec, deref)\n \n \n+def validate_arrays_in_definition(definition_spec, def_name=None):\n+ if definition_spec.get('type') == 'array' and 'items' not in definition_spec:\n+ raise SwaggerValidationError(\n+ 'Definition of type array must define `items` property{}.'.format(\n+ '' if not def_name else ' (definition {})'.format(def_name),\n+ ),\n+ )\n+\n+\n def validate_definition(definition, deref, def_name=None):\n definition = deref(definition)\n \n@@ -286,6 +295,7 @@ def validate_definition(definition, deref, def_name=None):\n )\n \n validate_defaults_in_definition(definition, deref)\n+ validate_arrays_in_definition(definition, def_name=def_name)\n \n if 'discriminator' in definition:\n required_props, not_required_props = get_collapsed_properties_type_mappings(definition, deref)\n"},"problem_statement":{"kind":"string","value":"Spec validation will not fail if items is not present and type is array\nThe following specs are not valid according to [Swagger 2.0 Specs](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameter-object), editor.swagger.io and according to `swagger-tools` npm package.\r\n```yaml\r\nswagger: '2.0'\r\ninfo:\r\n title: Example\r\nproduces:\r\n- application/json\r\npaths:\r\n /test:\r\n get:\r\n responses:\r\n '200':\r\n description: HTTP200\r\n schema:\r\n type: array\r\n```\r\n\r\nError reported by editor.swagger.io\r\n\r\n\r\nError reported by npm\r\n```\r\nAPI Errors:\r\n\r\n #/paths/~1test/get/responses/200/schema: Missing required property: items\r\n\r\n1 error and 0 warnings\r\n```"},"repo":{"kind":"string","value":"Yelp/swagger_spec_validator"},"test_patch":{"kind":"string","value":"diff --git a/tests/validator20/validate_definitions_test.py b/tests/validator20/validate_definitions_test.py\nindex 0b61dc0..6c2b6aa 100644\n--- a/tests/validator20/validate_definitions_test.py\n+++ b/tests/validator20/validate_definitions_test.py\n@@ -95,3 +95,30 @@ def test_api_check_default_fails(property_spec, validator, instance):\n validation_error = excinfo.value.args[1]\n assert validation_error.instance == instance\n assert validation_error.validator == validator\n+\n+\n+def test_type_array_with_items_succeed_validation():\n+ definitions = {\n+ 'definition_1': {\n+ 'type': 'array',\n+ 'items': {\n+ 'type': 'string',\n+ },\n+ },\n+ }\n+\n+ # Success if no exception are raised\n+ validate_definitions(definitions, lambda x: x)\n+\n+\n+def test_type_array_without_items_succeed_fails():\n+ definitions = {\n+ 'definition_1': {\n+ 'type': 'array',\n+ },\n+ }\n+\n+ with pytest.raises(SwaggerValidationError) as excinfo:\n+ validate_definitions(definitions, lambda x: x)\n+\n+ assert str(excinfo.value) == 'Definition of type array must define `items` property (definition definition_1).'\ndiff --git a/tests/validator20/validate_spec_test.py b/tests/validator20/validate_spec_test.py\nindex 5bc9e53..981255c 100644\n--- a/tests/validator20/validate_spec_test.py\n+++ b/tests/validator20/validate_spec_test.py\n@@ -341,3 +341,37 @@ def test_failure_because_references_in_operation_responses():\n validate_spec(invalid_spec)\n assert 'GET /endpoint does not have a valid responses section. ' \\\n 'That section cannot be just a reference to another object.' in str(excinfo.value)\n+\n+\n+def test_type_array_with_items_succeed_validation(minimal_swagger_dict):\n+ minimal_swagger_dict['definitions'] = {\n+ 'definition_1': {\n+ 'type': 'array',\n+ 'items': {\n+ 'type': 'string',\n+ },\n+ },\n+ }\n+\n+ # Success if no exception are raised\n+ validate_spec(minimal_swagger_dict)\n+\n+\n+@pytest.mark.parametrize(\n+ 'swagger_dict_override',\n+ (\n+ {\n+ 'definitions': {\n+ 'definition_1': {\n+ 'type': 'array',\n+ },\n+ },\n+ },\n+ )\n+)\n+def test_type_array_without_items_succeed_fails(minimal_swagger_dict, swagger_dict_override):\n+ minimal_swagger_dict.update(swagger_dict_override)\n+ with pytest.raises(SwaggerValidationError) as excinfo:\n+ validate_spec(minimal_swagger_dict)\n+\n+ assert str(excinfo.value) == 'Definition of type array must define `items` property (definition definition_1).'\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_hyperlinks\",\n \"has_media\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 3,\n \"test_score\": 2\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"2.1"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements-dev.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs==22.2.0\ncertifi==2021.5.30\nhttpretty==1.1.4\nimportlib-metadata==4.8.3\niniconfig==1.1.1\njsonschema==3.2.0\nmock==5.2.0\npackaging==21.3\npluggy==1.0.0\npy==1.11.0\npyparsing==3.1.4\npyrsistent==0.18.0\npytest==7.0.1\nPyYAML==6.0.1\nsix==1.17.0\n-e git+https://github.com/Yelp/swagger_spec_validator.git@40e1cc926775777ff2d56e271fd61697c6235579#egg=swagger_spec_validator\ntomli==1.2.3\ntyping_extensions==4.1.1\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: swagger_spec_validator\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - attrs==22.2.0\n - httpretty==1.1.4\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - jsonschema==3.2.0\n - mock==5.2.0\n - packaging==21.3\n - pluggy==1.0.0\n - py==1.11.0\n - pyparsing==3.1.4\n - pyrsistent==0.18.0\n - pytest==7.0.1\n - pyyaml==6.0.1\n - six==1.17.0\n - tomli==1.2.3\n - typing-extensions==4.1.1\n - zipp==3.6.0\nprefix: /opt/conda/envs/swagger_spec_validator\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/validator20/validate_definitions_test.py::test_type_array_without_items_succeed_fails"],"string":"[\n \"tests/validator20/validate_definitions_test.py::test_type_array_without_items_succeed_fails\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec0]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec1]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec2]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec3]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec4]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec5]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec6]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec7]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec8]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec9]","tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec10]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec0-type-wrong_type]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec1-type-wrong_type]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec2-type-wrong_type]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec3-type-wrong_type]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec4-type-wrong_type]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec5-type-wrong_type]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec6-type--1]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec7-minLength-short_string]","tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec8-type-not_a_number_or_boolean]","tests/validator20/validate_spec_test.py::test_success","tests/validator20/validate_spec_test.py::test_definitons_not_present_success","tests/validator20/validate_spec_test.py::test_empty_definitions_success","tests/validator20/validate_spec_test.py::test_api_parameters_as_refs","tests/validator20/validate_spec_test.py::test_fails_on_invalid_external_ref_in_dict","tests/validator20/validate_spec_test.py::test_fails_on_invalid_external_ref_in_list","tests/validator20/validate_spec_test.py::test_recursive_ref","tests/validator20/validate_spec_test.py::test_recursive_ref_failure","tests/validator20/validate_spec_test.py::test_complicated_refs","tests/validator20/validate_spec_test.py::test_specs_with_discriminator","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_required","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_string","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_in_properties","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_required","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_string","tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_in_properties","tests/validator20/validate_spec_test.py::test_read_yaml_specs","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec0]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec1]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec2]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec3]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec4]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec5]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec6]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec7]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec8]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec9]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec10]","tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec11]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec0-type-wrong_type]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec1-type-wrong_type]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec2-type-wrong_type]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec3-type-wrong_type]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec4-type-wrong_type]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec5-type-wrong_type]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec6-type--1]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec7-minLength-short_string]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec8-type-not_a_number_or_boolean]","tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec9-enum-not_valid]","tests/validator20/validate_spec_test.py::test_ref_without_str_argument","tests/validator20/validate_spec_test.py::test_failure_because_references_in_operation_responses","tests/validator20/validate_spec_test.py::test_type_array_with_items_succeed_validation","tests/validator20/validate_spec_test.py::test_type_array_without_items_succeed_fails[swagger_dict_override0]"],"string":"[\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec0]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec1]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec2]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec3]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec4]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec5]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec6]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec7]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec8]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec9]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec10]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec0-type-wrong_type]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec1-type-wrong_type]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec2-type-wrong_type]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec3-type-wrong_type]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec4-type-wrong_type]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec5-type-wrong_type]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec6-type--1]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec7-minLength-short_string]\",\n \"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec8-type-not_a_number_or_boolean]\",\n \"tests/validator20/validate_spec_test.py::test_success\",\n \"tests/validator20/validate_spec_test.py::test_definitons_not_present_success\",\n \"tests/validator20/validate_spec_test.py::test_empty_definitions_success\",\n \"tests/validator20/validate_spec_test.py::test_api_parameters_as_refs\",\n \"tests/validator20/validate_spec_test.py::test_fails_on_invalid_external_ref_in_dict\",\n \"tests/validator20/validate_spec_test.py::test_fails_on_invalid_external_ref_in_list\",\n \"tests/validator20/validate_spec_test.py::test_recursive_ref\",\n \"tests/validator20/validate_spec_test.py::test_recursive_ref_failure\",\n \"tests/validator20/validate_spec_test.py::test_complicated_refs\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_required\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_string\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_in_properties\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_required\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_string\",\n \"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_in_properties\",\n \"tests/validator20/validate_spec_test.py::test_read_yaml_specs\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec0]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec1]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec2]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec3]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec4]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec5]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec6]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec7]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec8]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec9]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec10]\",\n \"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec11]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec0-type-wrong_type]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec1-type-wrong_type]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec2-type-wrong_type]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec3-type-wrong_type]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec4-type-wrong_type]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec5-type-wrong_type]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec6-type--1]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec7-minLength-short_string]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec8-type-not_a_number_or_boolean]\",\n \"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec9-enum-not_valid]\",\n \"tests/validator20/validate_spec_test.py::test_ref_without_str_argument\",\n \"tests/validator20/validate_spec_test.py::test_failure_because_references_in_operation_responses\",\n \"tests/validator20/validate_spec_test.py::test_type_array_with_items_succeed_validation\",\n \"tests/validator20/validate_spec_test.py::test_type_array_without_items_succeed_fails[swagger_dict_override0]\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/validator20/validate_definitions_test.py::test_type_array_with_items_succeed_validation"],"string":"[\n \"tests/validator20/validate_definitions_test.py::test_type_array_with_items_succeed_validation\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2620,"string":"2,620"},"num_tokens_patch":{"kind":"number","value":294,"string":"294"},"before_filepaths":{"kind":"list like","value":["swagger_spec_validator/validator20.py"],"string":"[\n \"swagger_spec_validator/validator20.py\"\n]"}}},{"rowIdx":592,"cells":{"instance_id":{"kind":"string","value":"pika__pika-1066"},"base_commit":{"kind":"string","value":"17aed0fa20f55ed3bc080320414badbb27046e8d"},"created_at":{"kind":"string","value":"2018-06-06 22:49:26"},"environment_setup_commit":{"kind":"string","value":"4c904dea651caaf2a54b0fca0b9e908dec18a4f8"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/examples/consume.py b/examples/consume.py\nindex da95d9e..7344149 100644\n--- a/examples/consume.py\n+++ b/examples/consume.py\n@@ -1,17 +1,15 @@\n+import functools\n+import logging\n import pika\n \n-def on_message(channel, method_frame, header_frame, body):\n- channel.queue_declare(queue=body, auto_delete=True)\n+LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '\n+ '-35s %(lineno) -5d: %(message)s')\n+LOGGER = logging.getLogger(__name__)\n \n- if body.startswith(\"queue:\"):\n- queue = body.replace(\"queue:\", \"\")\n- key = body + \"_key\"\n- print(\"Declaring queue %s bound with key %s\" %(queue, key))\n- channel.queue_declare(queue=queue, auto_delete=True)\n- channel.queue_bind(queue=queue, exchange=\"test_exchange\", routing_key=key)\n- else:\n- print(\"Message body\", body)\n+logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)\n \n+def on_message(channel, method_frame, header_frame, body, userdata=None):\n+ LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body))\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n \n credentials = pika.PlainCredentials('guest', 'guest')\n@@ -24,7 +22,8 @@ channel.queue_declare(queue=\"standard\", auto_delete=True)\n channel.queue_bind(queue=\"standard\", exchange=\"test_exchange\", routing_key=\"standard_key\")\n channel.basic_qos(prefetch_count=1)\n \n-channel.basic_consume(on_message, 'standard')\n+on_message_callback = functools.partial(on_message, userdata='on_message_userdata')\n+channel.basic_consume(on_message_callback, 'standard')\n \n try:\n channel.start_consuming()\ndiff --git a/pika/heartbeat.py b/pika/heartbeat.py\nindex c02d5df..8d3d20a 100644\n--- a/pika/heartbeat.py\n+++ b/pika/heartbeat.py\n@@ -23,13 +23,22 @@ class HeartbeatChecker(object):\n :param pika.connection.Connection: Connection object\n :param int interval: Heartbeat check interval. Note: heartbeats will\n be sent at interval / 2 frequency.\n+ :param int idle_count: The number of heartbeat intervals without data\n+ received that will close the current connection.\n \n \"\"\"\n self._connection = connection\n+\n # Note: see the following document:\n # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout\n self._interval = float(interval / 2)\n- self._max_idle_count = idle_count\n+\n+ # Note: even though we're sending heartbeats in half the specified\n+ # interval, the broker will be sending them to us at the specified\n+ # interval. This means we'll be checking for an idle connection\n+ # twice as many times as the broker will send heartbeats to us,\n+ # so we need to double the max idle count here\n+ self._max_idle_count = idle_count * 2\n \n # Initialize counters\n self._bytes_received = 0\n@@ -82,9 +91,12 @@ class HeartbeatChecker(object):\n been idle too long.\n \n \"\"\"\n- LOGGER.debug('Received %i heartbeat frames, sent %i',\n+ LOGGER.debug('Received %i heartbeat frames, sent %i, '\n+ 'idle intervals %i, max idle count %i',\n self._heartbeat_frames_received,\n- self._heartbeat_frames_sent)\n+ self._heartbeat_frames_sent,\n+ self._idle_byte_intervals,\n+ self._max_idle_count)\n \n if self.connection_is_idle:\n return self._close_connection()\n"},"problem_statement":{"kind":"string","value":"HeartbeatChecker is confused about heartbeat timeouts\ncc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate.\r\n\r\n`HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`.\r\n\r\nSo, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds.\r\n\r\nSo, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection.\r\n\r\nThis is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window.\r\n\r\nI see two problems here:\r\n\r\n1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?)\r\n2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.\r\n"},"repo":{"kind":"string","value":"pika/pika"},"test_patch":{"kind":"string","value":"diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py\nindex fa97338..f0431c2 100644\n--- a/tests/unit/heartbeat_tests.py\n+++ b/tests/unit/heartbeat_tests.py\n@@ -29,7 +29,7 @@ class HeartbeatTests(unittest.TestCase):\n self.assertEqual(self.obj._interval, self.HALF_INTERVAL)\n \n def test_default_initialization_max_idle_count(self):\n- self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT)\n+ self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2)\n \n def test_constructor_assignment_connection(self):\n self.assertIs(self.obj._connection, self.mock_conn)\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 2\n },\n \"num_modified_files\": 2\n}"},"version":{"kind":"string","value":"0.12"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"mock\",\n \"cryptography\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"test-requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"certifi==2025.1.31\ncffi==1.17.1\ncharset-normalizer==3.4.1\ncodecov==2.1.13\ncoverage==7.8.0\ncryptography==44.0.2\nexceptiongroup==1.2.2\nidna==3.10\niniconfig==2.1.0\nmock==5.2.0\nnose==1.3.7\npackaging==24.2\n-e git+https://github.com/pika/pika.git@17aed0fa20f55ed3bc080320414badbb27046e8d#egg=pika\npluggy==1.5.0\npycparser==2.22\npytest==8.3.5\nrequests==2.32.3\ntomli==2.2.1\ntornado==6.4.2\nTwisted==15.3.0\nurllib3==2.3.0\nzope.interface==7.2\n"},"environment":{"kind":"string","value":"name: pika\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - certifi==2025.1.31\n - cffi==1.17.1\n - charset-normalizer==3.4.1\n - codecov==2.1.13\n - coverage==7.8.0\n - cryptography==44.0.2\n - exceptiongroup==1.2.2\n - idna==3.10\n - iniconfig==2.1.0\n - mock==5.2.0\n - nose==1.3.7\n - packaging==24.2\n - pluggy==1.5.0\n - pycparser==2.22\n - pytest==8.3.5\n - requests==2.32.3\n - tomli==2.2.1\n - tornado==6.4.2\n - twisted==15.3.0\n - urllib3==2.3.0\n - zope-interface==7.2\nprefix: /opt/conda/envs/pika\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count"],"string":"[\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false","tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true","tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection","tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close","tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false","tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals","tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval","tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false","tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true","tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame","tests/unit/heartbeat_tests.py::HeartbeatTests::test_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called","tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called","tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active","tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active","tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent"],"string":"[\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"BSD 3-Clause \"New\" or \"Revised\" License"},"__index_level_0__":{"kind":"number","value":2632,"string":"2,632"},"num_tokens_patch":{"kind":"number","value":864,"string":"864"},"before_filepaths":{"kind":"list like","value":["examples/consume.py","pika/heartbeat.py"],"string":"[\n \"examples/consume.py\",\n \"pika/heartbeat.py\"\n]"}}},{"rowIdx":593,"cells":{"instance_id":{"kind":"string","value":"oasis-open__cti-stix-validator-55"},"base_commit":{"kind":"string","value":"f3dcf83c352c99b5190e9697db7149ce3baf5961"},"created_at":{"kind":"string","value":"2018-06-08 12:26:02"},"environment_setup_commit":{"kind":"string","value":"120c27adf9db76511d01e696d234c35d45f2face"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/stix2validator/scripts/stix2_validator.py b/stix2validator/scripts/stix2_validator.py\nindex 15bd7b0..8dda167 100644\n--- a/stix2validator/scripts/stix2_validator.py\n+++ b/stix2validator/scripts/stix2_validator.py\n@@ -292,9 +292,6 @@ def main():\n options = ValidationOptions(args)\n \n try:\n- # Set the output level (e.g., quiet vs. verbose)\n- output.set_level(options.verbose)\n-\n if not options.no_cache:\n init_requests_cache(options.refresh_cache)\n \ndiff --git a/stix2validator/util.py b/stix2validator/util.py\nindex 4da0be5..327931f 100644\n--- a/stix2validator/util.py\n+++ b/stix2validator/util.py\n@@ -1,5 +1,7 @@\n from collections import Iterable\n \n+from .output import error, set_level, set_silent\n+\n \n class ValidationOptions(object):\n \"\"\"Collection of validation options which can be set via command line or\n@@ -72,6 +74,12 @@ class ValidationOptions(object):\n self.refresh_cache = refresh_cache\n self.clear_cache = clear_cache\n \n+ # Set the output level (e.g., quiet vs. verbose)\n+ if self.silent and self.verbose:\n+ error('Error: Output can either be silent or verbose, but not both.')\n+ set_level(self.verbose)\n+ set_silent(self.silent)\n+\n # Convert string of comma-separated checks to a list,\n # and convert check code numbers to names\n if self.disabled:\n"},"problem_statement":{"kind":"string","value":"handle options --verbose and --silent correctly\nRelated to #50 \r\n\r\nThe correct combination of these two should be as follows:\r\n\r\n|--verbose | --silent | desired behavior |\r\n| --- | --- | --- |\r\n|absent (default is False) | absent (default is False) | all messages except those printed by info |\r\n|absent (default is False) | present (True) | no messages printed\r\n| present (True) | absent (default is False) | all messages, including info are printed\r\n| present (True) | present (True) | error |\r\n\r\nCurrent behavior is:\r\n\r\n|--verbose | --silent | current behavior |\r\n| --- | --- | --- |\r\n|absent (default is False) | absent (default is False) | all messages except those printed by info |\r\n|absent (default is False) | present (ignored, so the default - False) | all messages except those printed by info |\r\n| present (True) | absent (default is False) | all messages, including info are printed\r\n| present (True) | present (ignored, so the default - False) | all messages, including info are printed |"},"repo":{"kind":"string","value":"oasis-open/cti-stix-validator"},"test_patch":{"kind":"string","value":"diff --git a/stix2validator/test/bundle_tests.py b/stix2validator/test/bundle_tests.py\nindex 8f417bd..52235ba 100644\n--- a/stix2validator/test/bundle_tests.py\n+++ b/stix2validator/test/bundle_tests.py\n@@ -1,6 +1,8 @@\n import copy\n import json\n \n+import pytest\n+\n from . import ValidatorTest\n \n VALID_BUNDLE = u\"\"\"\n@@ -51,3 +53,8 @@ class BundleTestCases(ValidatorTest):\n \n bundle['objects'][1]['modified'] = \"2017-06-22T14:09:00.123Z\"\n self.assertTrueWithOptions(bundle)\n+\n+ def test_silent_and_verbose(self):\n+ bundle = json.loads(VALID_BUNDLE)\n+ with pytest.raises(SystemExit):\n+ self.assertFalseWithOptions(bundle, silent=True, verbose=True)\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_issue_reference\",\n \"has_many_modified_files\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 1\n },\n \"num_modified_files\": 2\n}"},"version":{"kind":"string","value":"0.5"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"pytest-cov\",\n \"coverage\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"alabaster==0.7.13\nantlr4-python3-runtime==4.9.3\nappdirs==1.4.4\nattrs==21.4.0\nBabel==2.11.0\nbump2version==1.0.1\nbumpversion==0.6.0\ncertifi==2021.5.30\ncfgv==3.3.1\ncharset-normalizer==2.0.12\ncolorama==0.4.5\ncoverage==6.2\ndistlib==0.3.9\ndocutils==0.18.1\nfilelock==3.4.1\nidentify==2.4.4\nidna==3.10\nimagesize==1.4.1\nimportlib-metadata==4.8.3\nimportlib-resources==5.2.3\niniconfig==1.1.1\nitsdangerous==2.0.1\nJinja2==3.0.3\njsonschema==2.5.1\nMarkupSafe==2.0.1\nnodeenv==1.6.0\npackaging==21.3\nplatformdirs==2.4.0\npluggy==1.0.0\npre-commit==2.17.0\npy==1.11.0\nPygments==2.14.0\npyparsing==3.1.4\npytest==7.0.1\npytest-cov==4.0.0\npython-dateutil==2.9.0.post0\npytz==2025.2\nPyYAML==6.0.1\nrequests==2.27.1\nrequests-cache==0.7.5\nsimplejson==3.20.1\nsix==1.17.0\nsnowballstemmer==2.2.0\nSphinx==5.3.0\nsphinx-prompt==1.5.0\nsphinxcontrib-applehelp==1.0.2\nsphinxcontrib-devhelp==1.0.2\nsphinxcontrib-htmlhelp==2.0.0\nsphinxcontrib-jsmath==1.0.1\nsphinxcontrib-qthelp==1.0.3\nsphinxcontrib-serializinghtml==1.1.5\nstix2-patterns==2.0.0\n-e git+https://github.com/oasis-open/cti-stix-validator.git@f3dcf83c352c99b5190e9697db7149ce3baf5961#egg=stix2_validator\ntoml==0.10.2\ntomli==1.2.3\ntox==3.28.0\ntyping_extensions==4.1.1\nurl-normalize==1.4.3\nurllib3==1.26.20\nvirtualenv==20.16.2\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: cti-stix-validator\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - alabaster==0.7.13\n - antlr4-python3-runtime==4.9.3\n - appdirs==1.4.4\n - attrs==21.4.0\n - babel==2.11.0\n - bump2version==1.0.1\n - bumpversion==0.6.0\n - cfgv==3.3.1\n - charset-normalizer==2.0.12\n - colorama==0.4.5\n - coverage==6.2\n - distlib==0.3.9\n - docutils==0.18.1\n - filelock==3.4.1\n - identify==2.4.4\n - idna==3.10\n - imagesize==1.4.1\n - importlib-metadata==4.8.3\n - importlib-resources==5.2.3\n - iniconfig==1.1.1\n - itsdangerous==2.0.1\n - jinja2==3.0.3\n - jsonschema==2.5.1\n - markupsafe==2.0.1\n - nodeenv==1.6.0\n - packaging==21.3\n - platformdirs==2.4.0\n - pluggy==1.0.0\n - pre-commit==2.17.0\n - py==1.11.0\n - pygments==2.14.0\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pytest-cov==4.0.0\n - python-dateutil==2.9.0.post0\n - pytz==2025.2\n - pyyaml==6.0.1\n - requests==2.27.1\n - requests-cache==0.7.5\n - simplejson==3.20.1\n - six==1.17.0\n - snowballstemmer==2.2.0\n - sphinx==5.3.0\n - sphinx-prompt==1.5.0\n - sphinxcontrib-applehelp==1.0.2\n - sphinxcontrib-devhelp==1.0.2\n - sphinxcontrib-htmlhelp==2.0.0\n - sphinxcontrib-jsmath==1.0.1\n - sphinxcontrib-qthelp==1.0.3\n - sphinxcontrib-serializinghtml==1.1.5\n - stix2-patterns==2.0.0\n - toml==0.10.2\n - tomli==1.2.3\n - tox==3.28.0\n - typing-extensions==4.1.1\n - url-normalize==1.4.3\n - urllib3==1.26.20\n - virtualenv==20.16.2\n - zipp==3.6.0\nprefix: /opt/conda/envs/cti-stix-validator\n"},"FAIL_TO_PASS":{"kind":"list like","value":["stix2validator/test/bundle_tests.py::BundleTestCases::test_silent_and_verbose"],"string":"[\n \"stix2validator/test/bundle_tests.py::BundleTestCases::test_silent_and_verbose\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_duplicate_ids","stix2validator/test/bundle_tests.py::BundleTestCases::test_wellformed_bundle"],"string":"[\n \"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_duplicate_ids\",\n \"stix2validator/test/bundle_tests.py::BundleTestCases::test_wellformed_bundle\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_created","stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_object_categories","stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_version"],"string":"[\n \"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_created\",\n \"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_object_categories\",\n \"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_version\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"BSD 3-Clause \"New\" or \"Revised\" License"},"__index_level_0__":{"kind":"number","value":2640,"string":"2,640"},"num_tokens_patch":{"kind":"number","value":375,"string":"375"},"before_filepaths":{"kind":"list like","value":["stix2validator/scripts/stix2_validator.py","stix2validator/util.py"],"string":"[\n \"stix2validator/scripts/stix2_validator.py\",\n \"stix2validator/util.py\"\n]"}}},{"rowIdx":594,"cells":{"instance_id":{"kind":"string","value":"attwad__python-osc-67"},"base_commit":{"kind":"string","value":"73777b367ac4327e9fd0b799366959e50266ebc2"},"created_at":{"kind":"string","value":"2018-06-08 20:16:51"},"environment_setup_commit":{"kind":"string","value":"73777b367ac4327e9fd0b799366959e50266ebc2"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/pythonosc/osc_message.py b/pythonosc/osc_message.py\nindex 911dbcf..b89a0d6 100644\n--- a/pythonosc/osc_message.py\n+++ b/pythonosc/osc_message.py\n@@ -41,6 +41,8 @@ class OscMessage(object):\n val, index = osc_types.get_int(self._dgram, index)\n elif param == \"f\": # Float.\n val, index = osc_types.get_float(self._dgram, index)\n+ elif param == \"d\": # Double.\n+ val, index = osc_types.get_double(self._dgram, index)\n elif param == \"s\": # String.\n val, index = osc_types.get_string(self._dgram, index)\n elif param == \"b\": # Blob.\ndiff --git a/pythonosc/osc_message_builder.py b/pythonosc/osc_message_builder.py\nindex 0f9bfba..28128fb 100644\n--- a/pythonosc/osc_message_builder.py\n+++ b/pythonosc/osc_message_builder.py\n@@ -12,6 +12,7 @@ class OscMessageBuilder(object):\n \"\"\"Builds arbitrary OscMessage instances.\"\"\"\n \n ARG_TYPE_FLOAT = \"f\"\n+ ARG_TYPE_DOUBLE = \"d\"\n ARG_TYPE_INT = \"i\"\n ARG_TYPE_STRING = \"s\"\n ARG_TYPE_BLOB = \"b\"\n@@ -24,8 +25,8 @@ class OscMessageBuilder(object):\n ARG_TYPE_ARRAY_STOP = \"]\"\n \n _SUPPORTED_ARG_TYPES = (\n- ARG_TYPE_FLOAT, ARG_TYPE_INT, ARG_TYPE_BLOB, ARG_TYPE_STRING, ARG_TYPE_RGBA,\n- ARG_TYPE_MIDI, ARG_TYPE_TRUE, ARG_TYPE_FALSE)\n+ ARG_TYPE_FLOAT, ARG_TYPE_DOUBLE, ARG_TYPE_INT, ARG_TYPE_BLOB, ARG_TYPE_STRING,\n+ ARG_TYPE_RGBA, ARG_TYPE_MIDI, ARG_TYPE_TRUE, ARG_TYPE_FALSE)\n \n def __init__(self, address=None):\n \"\"\"Initialize a new builder for a message.\n@@ -143,6 +144,8 @@ class OscMessageBuilder(object):\n dgram += osc_types.write_int(value)\n elif arg_type == self.ARG_TYPE_FLOAT:\n dgram += osc_types.write_float(value)\n+ elif arg_type == self.ARG_TYPE_DOUBLE:\n+ dgram += osc_types.write_double(value)\n elif arg_type == self.ARG_TYPE_BLOB:\n dgram += osc_types.write_blob(value)\n elif arg_type == self.ARG_TYPE_RGBA:\ndiff --git a/pythonosc/parsing/osc_types.py b/pythonosc/parsing/osc_types.py\nindex a91003b..5558399 100644\n--- a/pythonosc/parsing/osc_types.py\n+++ b/pythonosc/parsing/osc_types.py\n@@ -21,6 +21,7 @@ IMMEDIATELY = 0\n # Datagram length in bytes for types that have a fixed size.\n _INT_DGRAM_LEN = 4\n _FLOAT_DGRAM_LEN = 4\n+_DOUBLE_DGRAM_LEN = 8\n _DATE_DGRAM_LEN = _INT_DGRAM_LEN * 2\n # Strings and blob dgram length is always a multiple of 4 bytes.\n _STRING_DGRAM_PAD = 4\n@@ -199,6 +200,42 @@ def get_float(dgram, start_index):\n raise ParseError('Could not parse datagram %s' % e)\n \n \n+def write_double(val):\n+ \"\"\"Returns the datagram for the given double parameter value\n+\n+ Raises:\n+ - BuildError if the double could not be converted.\n+ \"\"\"\n+ try:\n+ return struct.pack('>d', val)\n+ except struct.error as e:\n+ raise BuildError('Wrong argument value passed: {}'.format(e))\n+\n+\n+def get_double(dgram, start_index):\n+ \"\"\"Get a 64-bit big-endian IEEE 754 floating point number from the datagram.\n+\n+ Args:\n+ dgram: A datagram packet.\n+ start_index: An index where the double starts in the datagram.\n+\n+ Returns:\n+ A tuple containing the double and the new end index.\n+\n+ Raises:\n+ ParseError if the datagram could not be parsed.\n+ \"\"\"\n+ try:\n+ if len(dgram[start_index:]) < _DOUBLE_DGRAM_LEN:\n+ raise ParseError('Datagram is too short')\n+ return (\n+ struct.unpack('>d',\n+ dgram[start_index:start_index + _DOUBLE_DGRAM_LEN])[0],\n+ start_index + _DOUBLE_DGRAM_LEN)\n+ except (struct.error, TypeError) as e:\n+ raise ParseError('Could not parse datagram {}'.format(e))\n+\n+\n def get_blob(dgram, start_index):\n \"\"\" Get a blob from the datagram.\n \n"},"problem_statement":{"kind":"string","value":"Add support for 64 bits double type\n`unhandled type: d` warnings are all that gets returned, no handlers even end up running."},"repo":{"kind":"string","value":"attwad/python-osc"},"test_patch":{"kind":"string","value":"diff --git a/pythonosc/test/parsing/test_osc_types.py b/pythonosc/test/parsing/test_osc_types.py\nindex 0863fd5..8734ad1 100644\n--- a/pythonosc/test/parsing/test_osc_types.py\n+++ b/pythonosc/test/parsing/test_osc_types.py\n@@ -232,6 +232,39 @@ class TestFloat(unittest.TestCase):\n self.assertEqual((0, 4), osc_types.get_float(dgram, 0))\n \n \n+class TestDouble(unittest.TestCase):\n+\n+ def test_get_double(self):\n+ cases = {\n+ b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00': (0.0, 8),\n+ b'?\\xf0\\x00\\x00\\x00\\x00\\x00\\x00': (1.0, 8),\n+ b'@\\x00\\x00\\x00\\x00\\x00\\x00\\x00': (2.0, 8),\n+ b'\\xbf\\xf0\\x00\\x00\\x00\\x00\\x00\\x00': (-1.0, 8),\n+ b'\\xc0\\x00\\x00\\x00\\x00\\x00\\x00\\x00': (-2.0, 8),\n+\n+ b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00GARBAGE\": (0.0, 8),\n+ }\n+\n+ for dgram, expected in cases.items():\n+ self.assertAlmostEqual(expected, osc_types.get_double(dgram, 0))\n+\n+ def test_get_double_raises_on_wrong_dgram(self):\n+ cases = [True]\n+\n+ for case in cases:\n+ self.assertRaises(osc_types.ParseError, osc_types.get_double, case, 0)\n+\n+ def test_get_double_raises_on_type_error(self):\n+ cases = [None]\n+\n+ for case in cases:\n+ self.assertRaises(osc_types.ParseError, osc_types.get_double, case, 0)\n+\n+ def test_datagram_too_short_pads(self):\n+ dgram = b'\\x00' * 2\n+ self.assertRaises(osc_types.ParseError, osc_types.get_double, dgram, 0)\n+\n+\n class TestBlob(unittest.TestCase):\n \n def test_get_blob(self):\ndiff --git a/pythonosc/test/test_osc_message_builder.py b/pythonosc/test/test_osc_message_builder.py\nindex c9720b4..d5bbe25 100644\n--- a/pythonosc/test/test_osc_message_builder.py\n+++ b/pythonosc/test/test_osc_message_builder.py\n@@ -43,14 +43,15 @@ class TestOscMessageBuilder(unittest.TestCase):\n builder.add_arg([1, [\"abc\"]], [builder.ARG_TYPE_INT, [builder.ARG_TYPE_STRING]])\n builder.add_arg(4278255360, builder.ARG_TYPE_RGBA)\n builder.add_arg((1, 145, 36, 125), builder.ARG_TYPE_MIDI)\n- self.assertEqual(len(\"fisTFb[i[s]]\")*2+2, len(builder.args))\n+ builder.add_arg(1e-9, builder.ARG_TYPE_DOUBLE)\n+ self.assertEqual(len(\"fisTFb[i[s]]\")*2+3, len(builder.args))\n self.assertEqual(\"/SYNC\", builder.address)\n builder.address = '/SEEK'\n msg = builder.build()\n self.assertEqual(\"/SEEK\", msg.address)\n self.assertSequenceEqual(\n [4.0, 2, \"value\", True, False, b\"\\x01\\x02\\x03\", [1, [\"abc\"]]] * 2 +\n- [4278255360, (1, 145, 36, 125)],\n+ [4278255360, (1, 145, 36, 125), 1e-9],\n msg.params)\n \n def test_long_list(self):\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_many_modified_files\",\n \"has_many_hunks\",\n \"has_pytest_match_arg\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 2,\n \"test_score\": 0\n },\n \"num_modified_files\": 3\n}"},"version":{"kind":"string","value":"unknown"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": null,\n \"python\": \"3.9\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\npackaging @ file:///croot/packaging_1734472117206/work\npluggy @ file:///croot/pluggy_1733169602837/work\npytest @ file:///croot/pytest_1738938843180/work\n-e git+https://github.com/attwad/python-osc.git@73777b367ac4327e9fd0b799366959e50266ebc2#egg=python_osc\ntomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work\n"},"environment":{"kind":"string","value":"name: python-osc\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - exceptiongroup=1.2.0=py39h06a4308_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - packaging=24.2=py39h06a4308_0\n - pip=25.0=py39h06a4308_0\n - pluggy=1.5.0=py39h06a4308_0\n - pytest=8.3.4=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tomli=2.0.1=py39h06a4308_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\nprefix: /opt/conda/envs/python-osc\n"},"FAIL_TO_PASS":{"kind":"list like","value":["pythonosc/test/parsing/test_osc_types.py::TestDouble::test_datagram_too_short_pads","pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double","pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double_raises_on_type_error","pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double_raises_on_wrong_dgram","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_all_param_types"],"string":"[\n \"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_datagram_too_short_pads\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double_raises_on_type_error\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double_raises_on_wrong_dgram\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_all_param_types\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string","pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_on_wrong_dgram","pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_on_wrong_start_index_negative","pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_when_datagram_too_short","pythonosc/test/parsing/test_osc_types.py::TestInteger::test_datagram_too_short","pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer","pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_type_error","pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_wrong_start_index","pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_wrong_start_index_negative","pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_datagram_too_short","pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba","pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_type_error","pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_wrong_start_index","pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_wrong_start_index_negative","pythonosc/test/parsing/test_osc_types.py::TestMidi::test_datagram_too_short","pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi","pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_type_error","pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_wrong_start_index","pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_wrong_start_index_negative","pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag","pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_type_error","pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_wrong_start_index","pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_wrong_start_index_negative","pythonosc/test/parsing/test_osc_types.py::TestDate::test_ttag_datagram_too_short","pythonosc/test/parsing/test_osc_types.py::TestFloat::test_datagram_too_short_pads","pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float","pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float_raises_on_type_error","pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float_raises_on_wrong_dgram","pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob","pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_on_wrong_dgram","pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_on_wrong_start_index","pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_too_short_buffer","pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blog_raises_on_wrong_start_index_negative","pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_datagram_too_short","pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_immediately_dgram","pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_origin_of_time","pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_write_date","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_blob","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_blob_raises","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_float","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_float_raises","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_int","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_int_raises","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_string","pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_string_raises","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_add_arg_invalid_infered_type","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_bool_encoding","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_build_noarg_message","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_build_wrong_type_raises","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_just_address","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_long_list","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_no_address_raises","pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_wrong_param_raise"],"string":"[\n \"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string\",\n \"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_on_wrong_dgram\",\n \"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_on_wrong_start_index_negative\",\n \"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_when_datagram_too_short\",\n \"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_datagram_too_short\",\n \"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer\",\n \"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_type_error\",\n \"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_wrong_start_index\",\n \"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_wrong_start_index_negative\",\n \"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_datagram_too_short\",\n \"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba\",\n \"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_type_error\",\n \"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_wrong_start_index\",\n \"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_wrong_start_index_negative\",\n \"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_datagram_too_short\",\n \"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi\",\n \"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_type_error\",\n \"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_wrong_start_index\",\n \"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_wrong_start_index_negative\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_type_error\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_wrong_start_index\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_wrong_start_index_negative\",\n \"pythonosc/test/parsing/test_osc_types.py::TestDate::test_ttag_datagram_too_short\",\n \"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_datagram_too_short_pads\",\n \"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float\",\n \"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float_raises_on_type_error\",\n \"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float_raises_on_wrong_dgram\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_on_wrong_dgram\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_on_wrong_start_index\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_too_short_buffer\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blog_raises_on_wrong_start_index_negative\",\n \"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_datagram_too_short\",\n \"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_immediately_dgram\",\n \"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_origin_of_time\",\n \"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_write_date\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_blob\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_blob_raises\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_float\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_float_raises\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_int\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_int_raises\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_string\",\n \"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_string_raises\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_add_arg_invalid_infered_type\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_bool_encoding\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_build_noarg_message\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_build_wrong_type_raises\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_just_address\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_long_list\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_no_address_raises\",\n \"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_wrong_param_raise\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"The Unlicense"},"__index_level_0__":{"kind":"number","value":2642,"string":"2,642"},"num_tokens_patch":{"kind":"number","value":1065,"string":"1,065"},"before_filepaths":{"kind":"list like","value":["pythonosc/osc_message.py","pythonosc/osc_message_builder.py","pythonosc/parsing/osc_types.py"],"string":"[\n \"pythonosc/osc_message.py\",\n \"pythonosc/osc_message_builder.py\",\n \"pythonosc/parsing/osc_types.py\"\n]"}}},{"rowIdx":595,"cells":{"instance_id":{"kind":"string","value":"pika__pika-1071"},"base_commit":{"kind":"string","value":"107fb0fd7028250fda0d8f901b65c93a91d7cb82"},"created_at":{"kind":"string","value":"2018-06-09 18:20:40"},"environment_setup_commit":{"kind":"string","value":"4c904dea651caaf2a54b0fca0b9e908dec18a4f8"},"hints_text":{"kind":"string","value":"vitaly-krugl: @lukebakken, let's work out one of these parallel pull requests first, then create the second one after that. \nmichaelklishin: This [rabbitmq-users thread](https://groups.google.com/d/msg/rabbitmq-users/Fmfeqe5ocTY/0fxMMVsSAgAJ) is worth mentioning."},"patch":{"kind":"string","value":"diff --git a/examples/consume.py b/examples/consume.py\nindex 7344149..26e4620 100644\n--- a/examples/consume.py\n+++ b/examples/consume.py\n@@ -1,3 +1,4 @@\n+\"\"\"Basic message consumer example\"\"\"\n import functools\n import logging\n import pika\n@@ -8,26 +9,36 @@ LOGGER = logging.getLogger(__name__)\n \n logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)\n \n-def on_message(channel, method_frame, header_frame, body, userdata=None):\n- LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body))\n- channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n-\n-credentials = pika.PlainCredentials('guest', 'guest')\n-parameters = pika.ConnectionParameters('localhost', credentials=credentials)\n-connection = pika.BlockingConnection(parameters)\n-\n-channel = connection.channel()\n-channel.exchange_declare(exchange=\"test_exchange\", exchange_type=\"direct\", passive=False, durable=True, auto_delete=False)\n-channel.queue_declare(queue=\"standard\", auto_delete=True)\n-channel.queue_bind(queue=\"standard\", exchange=\"test_exchange\", routing_key=\"standard_key\")\n-channel.basic_qos(prefetch_count=1)\n-\n-on_message_callback = functools.partial(on_message, userdata='on_message_userdata')\n-channel.basic_consume(on_message_callback, 'standard')\n-\n-try:\n- channel.start_consuming()\n-except KeyboardInterrupt:\n- channel.stop_consuming()\n-\n-connection.close()\n+def on_message(chan, method_frame, _header_frame, body, userdata=None):\n+ \"\"\"Called when a message is received. Log message and ack it.\"\"\"\n+ LOGGER.info('Userdata: %s Message body: %s', userdata, body)\n+ chan.basic_ack(delivery_tag=method_frame.delivery_tag)\n+\n+def main():\n+ \"\"\"Main method.\"\"\"\n+ credentials = pika.PlainCredentials('guest', 'guest')\n+ parameters = pika.ConnectionParameters('localhost', credentials=credentials)\n+ connection = pika.BlockingConnection(parameters)\n+\n+ channel = connection.channel()\n+ channel.exchange_declare(exchange=\"test_exchange\",\n+ exchange_type=\"direct\",\n+ passive=False,\n+ durable=True,\n+ auto_delete=False)\n+ channel.queue_declare(queue=\"standard\", auto_delete=True)\n+ channel.queue_bind(queue=\"standard\", exchange=\"test_exchange\", routing_key=\"standard_key\")\n+ channel.basic_qos(prefetch_count=1)\n+\n+ on_message_callback = functools.partial(on_message, userdata='on_message_userdata')\n+ channel.basic_consume(on_message_callback, 'standard')\n+\n+ try:\n+ channel.start_consuming()\n+ except KeyboardInterrupt:\n+ channel.stop_consuming()\n+\n+ connection.close()\n+\n+if __name__ == '__main__':\n+ main()\ndiff --git a/pika/connection.py b/pika/connection.py\nindex 0c4e2a7..bed9bdb 100644\n--- a/pika/connection.py\n+++ b/pika/connection.py\n@@ -1301,7 +1301,7 @@ class Connection(object):\n self._backpressure_multiplier = value\n \n #\n- # Connections state properties\n+ # Connection state properties\n #\n \n @property\ndiff --git a/pika/heartbeat.py b/pika/heartbeat.py\nindex 8d3d20a..7d4d7dd 100644\n--- a/pika/heartbeat.py\n+++ b/pika/heartbeat.py\n@@ -7,38 +7,67 @@ LOGGER = logging.getLogger(__name__)\n \n \n class HeartbeatChecker(object):\n- \"\"\"Checks to make sure that our heartbeat is received at the expected\n- intervals.\n+ \"\"\"Sends heartbeats to the broker. The provided timeout is used to\n+ determine if the connection is stale - no received heartbeats or\n+ other activity will close the connection. See the parameter list for more\n+ details.\n \n \"\"\"\n- DEFAULT_INTERVAL = 60\n- MAX_IDLE_COUNT = 2\n _CONNECTION_FORCED = 320\n- _STALE_CONNECTION = \"Too Many Missed Heartbeats, No reply in %i seconds\"\n+ _STALE_CONNECTION = \"No activity or too many missed meartbeats in the last %i seconds\"\n \n- def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT):\n- \"\"\"Create a heartbeat on connection sending a heartbeat frame every\n- interval seconds.\n+ def __init__(self, connection, timeout):\n+ \"\"\"Create an object that will check for activity on the provided\n+ connection as well as receive heartbeat frames from the broker. The\n+ timeout parameter defines a window within which this activity must\n+ happen. If not, the connection is considered dead and closed.\n+\n+ The value passed for timeout is also used to calculate an interval\n+ at which a heartbeat frame is sent to the broker. The interval is\n+ equal to the timeout value divided by two.\n \n :param pika.connection.Connection: Connection object\n- :param int interval: Heartbeat check interval. Note: heartbeats will\n- be sent at interval / 2 frequency.\n- :param int idle_count: The number of heartbeat intervals without data\n- received that will close the current connection.\n+ :param int timeout: Connection idle timeout. If no activity occurs on the\n+ connection nor heartbeat frames received during the\n+ timeout window the connection will be closed. The\n+ interval used to send heartbeats is calculated from\n+ this value by dividing it by two.\n \n \"\"\"\n+ if timeout < 1:\n+ raise ValueError('timeout must >= 0, but got %r' % (timeout,))\n+\n self._connection = connection\n \n- # Note: see the following document:\n+ # Note: see the following documents:\n # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout\n- self._interval = float(interval / 2)\n-\n- # Note: even though we're sending heartbeats in half the specified\n- # interval, the broker will be sending them to us at the specified\n- # interval. This means we'll be checking for an idle connection\n- # twice as many times as the broker will send heartbeats to us,\n- # so we need to double the max idle count here\n- self._max_idle_count = idle_count * 2\n+ # https://github.com/pika/pika/pull/1072\n+ # https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion\n+ # There is a certain amount of confusion around how client developers\n+ # interpret the spec. The spec talks about 2 missed heartbeats as a\n+ # *timeout*, plus that any activity on the connection counts for a\n+ # heartbeat. This is to avoid edge cases and not to depend on network\n+ # latency.\n+ self._timeout = timeout\n+\n+ self._send_interval = float(timeout) / 2\n+\n+ # Note: Pika will calculate the heartbeat / connectivity check interval\n+ # by adding 5 seconds to the negotiated timeout to leave a bit of room\n+ # for broker heartbeats that may be right at the edge of the timeout\n+ # window. This is different behavior from the RabbitMQ Java client and\n+ # the spec that suggests a check interval equivalent to two times the\n+ # heartbeat timeout value. But, one advantage of adding a small amount\n+ # is that bad connections will be detected faster.\n+ # https://github.com/pika/pika/pull/1072#issuecomment-397850795\n+ # https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780\n+ # https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192\n+ self._check_interval = timeout + 5\n+\n+ LOGGER.debug('timeout: %f send_interval: %f check_interval: %f',\n+ self._timeout,\n+ self._send_interval,\n+ self._check_interval)\n \n # Initialize counters\n self._bytes_received = 0\n@@ -47,21 +76,10 @@ class HeartbeatChecker(object):\n self._heartbeat_frames_sent = 0\n self._idle_byte_intervals = 0\n \n- # The handle for the last timer\n- self._timer = None\n-\n- # Setup the timer to fire in _interval seconds\n- self._setup_timer()\n-\n- @property\n- def active(self):\n- \"\"\"Return True if the connection's heartbeat attribute is set to this\n- instance.\n-\n- :rtype True\n-\n- \"\"\"\n- return self._connection.heartbeat is self\n+ self._send_timer = None\n+ self._check_timer = None\n+ self._start_send_timer()\n+ self._start_check_timer()\n \n @property\n def bytes_received_on_connection(self):\n@@ -78,74 +96,78 @@ class HeartbeatChecker(object):\n to trip the max idle threshold.\n \n \"\"\"\n- return self._idle_byte_intervals >= self._max_idle_count\n+ return self._idle_byte_intervals > 0\n \n def received(self):\n \"\"\"Called when a heartbeat is received\"\"\"\n LOGGER.debug('Received heartbeat frame')\n self._heartbeat_frames_received += 1\n \n- def send_and_check(self):\n- \"\"\"Invoked by a timer to send a heartbeat when we need to, check to see\n+ def _send_heartbeat(self):\n+ \"\"\"Invoked by a timer to send a heartbeat when we need to.\n+\n+ \"\"\"\n+ LOGGER.debug('Sending heartbeat frame')\n+ self._send_heartbeat_frame()\n+ self._start_send_timer()\n+\n+ def _check_heartbeat(self):\n+ \"\"\"Invoked by a timer to check for broker heartbeats. Checks to see\n if we've missed any heartbeats and disconnect our connection if it's\n been idle too long.\n \n \"\"\"\n+ if self._has_received_data:\n+ self._idle_byte_intervals = 0\n+ else:\n+ # Connection has not received any data, increment the counter\n+ self._idle_byte_intervals += 1\n+\n LOGGER.debug('Received %i heartbeat frames, sent %i, '\n- 'idle intervals %i, max idle count %i',\n+ 'idle intervals %i',\n self._heartbeat_frames_received,\n self._heartbeat_frames_sent,\n- self._idle_byte_intervals,\n- self._max_idle_count)\n+ self._idle_byte_intervals)\n \n if self.connection_is_idle:\n- return self._close_connection()\n-\n- # Connection has not received any data, increment the counter\n- if not self._has_received_data:\n- self._idle_byte_intervals += 1\n- else:\n- self._idle_byte_intervals = 0\n+ self._close_connection()\n+ return\n \n- # Update the counters of bytes sent/received and the frames received\n- self._update_counters()\n-\n- # Send a heartbeat frame\n- self._send_heartbeat_frame()\n-\n- # Update the timer to fire again\n- self._start_timer()\n+ self._start_check_timer()\n \n def stop(self):\n \"\"\"Stop the heartbeat checker\"\"\"\n- if self._timer:\n- LOGGER.debug('Removing timeout for next heartbeat interval')\n- self._connection.remove_timeout(self._timer)\n- self._timer = None\n+ if self._send_timer:\n+ LOGGER.debug('Removing timer for next heartbeat send interval')\n+ self._connection.remove_timeout(self._send_timer) # pylint: disable=W0212\n+ self._send_timer = None\n+ if self._check_timer:\n+ LOGGER.debug('Removing timer for next heartbeat check interval')\n+ self._connection.remove_timeout(self._check_timer) # pylint: disable=W0212\n+ self._check_timer = None\n \n def _close_connection(self):\n \"\"\"Close the connection with the AMQP Connection-Forced value.\"\"\"\n LOGGER.info('Connection is idle, %i stale byte intervals',\n self._idle_byte_intervals)\n- duration = self._max_idle_count * self._interval\n- text = HeartbeatChecker._STALE_CONNECTION % duration\n+ text = HeartbeatChecker._STALE_CONNECTION % self._timeout\n \n # NOTE: this won't achieve the perceived effect of sending\n # Connection.Close to broker, because the frame will only get buffered\n # in memory before the next statement terminates the connection.\n self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text)\n \n- self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED,\n+ self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, # pylint: disable=W0212\n text)\n \n @property\n def _has_received_data(self):\n- \"\"\"Returns True if the connection has received data on the connection.\n+ \"\"\"Returns True if the connection has received data.\n \n :rtype: bool\n \n \"\"\"\n- return not self._bytes_received == self.bytes_received_on_connection\n+ return self._bytes_received != self.bytes_received_on_connection\n \n @staticmethod\n def _new_heartbeat_frame():\n@@ -161,25 +183,27 @@ class HeartbeatChecker(object):\n \n \"\"\"\n LOGGER.debug('Sending heartbeat frame')\n- self._connection._send_frame(self._new_heartbeat_frame())\n+ self._connection._send_frame( # pylint: disable=W0212\n+ self._new_heartbeat_frame())\n self._heartbeat_frames_sent += 1\n \n- def _setup_timer(self):\n- \"\"\"Use the connection objects delayed_call function which is\n- implemented by the Adapter for calling the check_heartbeats function\n- every interval seconds.\n-\n- \"\"\"\n- self._timer = self._connection.add_timeout(self._interval,\n- self.send_and_check)\n-\n- def _start_timer(self):\n- \"\"\"If the connection still has this object set for heartbeats, add a\n- new timer.\n+ def _start_send_timer(self):\n+ \"\"\"Start a new heartbeat send timer.\"\"\"\n+ self._send_timer = self._connection.add_timeout( # pylint: disable=W0212\n+ self._send_interval,\n+ self._send_heartbeat)\n+\n+ def _start_check_timer(self):\n+ \"\"\"Start a new heartbeat check timer.\"\"\"\n+ # Note: update counters now to get current values\n+ # at the start of the timeout window. Values will be\n+ # checked against the connection's byte count at the\n+ # end of the window\n+ self._update_counters()\n \n- \"\"\"\n- if self.active:\n- self._setup_timer()\n+ self._check_timer = self._connection.add_timeout( # pylint: disable=W0212\n+ self._check_interval,\n+ self._check_heartbeat)\n \n def _update_counters(self):\n \"\"\"Update the internal counters for bytes sent and received and the\n"},"problem_statement":{"kind":"string","value":"HeartbeatChecker is confused about heartbeat timeouts\ncc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate.\r\n\r\n`HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`.\r\n\r\nSo, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds.\r\n\r\nSo, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection.\r\n\r\nThis is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window.\r\n\r\nI see two problems here:\r\n\r\n1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?)\r\n2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.\r\n"},"repo":{"kind":"string","value":"pika/pika"},"test_patch":{"kind":"string","value":"diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py\nindex f0431c2..71fa552 100644\n--- a/tests/unit/heartbeat_tests.py\n+++ b/tests/unit/heartbeat_tests.py\n@@ -8,11 +8,11 @@ import mock\n \n from pika import connection, frame, heartbeat\n \n-\n class HeartbeatTests(unittest.TestCase):\n \n INTERVAL = 60\n- HALF_INTERVAL = INTERVAL / 2\n+ SEND_INTERVAL = float(INTERVAL) / 2\n+ CHECK_INTERVAL = INTERVAL + 5\n \n def setUp(self):\n self.mock_conn = mock.Mock(spec=connection.Connection)\n@@ -25,23 +25,26 @@ class HeartbeatTests(unittest.TestCase):\n del self.obj\n del self.mock_conn\n \n- def test_default_initialization_interval(self):\n- self.assertEqual(self.obj._interval, self.HALF_INTERVAL)\n-\n- def test_default_initialization_max_idle_count(self):\n- self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2)\n-\n def test_constructor_assignment_connection(self):\n self.assertIs(self.obj._connection, self.mock_conn)\n \n- def test_constructor_assignment_heartbeat_interval(self):\n- self.assertEqual(self.obj._interval, self.HALF_INTERVAL)\n+ def test_constructor_assignment_intervals(self):\n+ self.assertEqual(self.obj._send_interval, self.SEND_INTERVAL)\n+ self.assertEqual(self.obj._check_interval, self.CHECK_INTERVAL)\n \n def test_constructor_initial_bytes_received(self):\n- self.assertEqual(self.obj._bytes_received, 0)\n+ # Note: _bytes_received is initialized by calls\n+ # to _start_check_timer which calls _update_counters\n+ # which reads the initial values from the connection\n+ self.assertEqual(self.obj._bytes_received,\n+ self.mock_conn.bytes_received)\n \n def test_constructor_initial_bytes_sent(self):\n- self.assertEqual(self.obj._bytes_received, 0)\n+ # Note: _bytes_received is initialized by calls\n+ # to _start_check_timer which calls _update_counters\n+ # which reads the initial values from the connection\n+ self.assertEqual(self.obj._bytes_sent,\n+ self.mock_conn.bytes_sent)\n \n def test_constructor_initial_heartbeat_frames_received(self):\n self.assertEqual(self.obj._heartbeat_frames_received, 0)\n@@ -52,18 +55,15 @@ class HeartbeatTests(unittest.TestCase):\n def test_constructor_initial_idle_byte_intervals(self):\n self.assertEqual(self.obj._idle_byte_intervals, 0)\n \n- @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')\n- def test_constructor_called_setup_timer(self, timer):\n- heartbeat.HeartbeatChecker(self.mock_conn)\n+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer')\n+ def test_constructor_called_start_send_timer(self, timer):\n+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n timer.assert_called_once_with()\n \n- def test_active_true(self):\n- self.mock_conn.heartbeat = self.obj\n- self.assertTrue(self.obj.active)\n-\n- def test_active_false(self):\n- self.mock_conn.heartbeat = mock.Mock()\n- self.assertFalse(self.obj.active)\n+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer')\n+ def test_constructor_called_start_check_timer(self, timer):\n+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n+ timer.assert_called_once_with()\n \n def test_bytes_received_on_connection(self):\n self.mock_conn.bytes_received = 128\n@@ -81,54 +81,63 @@ class HeartbeatTests(unittest.TestCase):\n self.assertTrue(self.obj._heartbeat_frames_received, 1)\n \n @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection')\n- def test_send_and_check_not_closed(self, close_connection):\n- obj = heartbeat.HeartbeatChecker(self.mock_conn)\n- obj.send_and_check()\n+ def test_send_heartbeat_not_closed(self, close_connection):\n+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n+ obj._send_heartbeat()\n close_connection.assert_not_called()\n \n @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection')\n- def test_send_and_check_missed_bytes(self, close_connection):\n- obj = heartbeat.HeartbeatChecker(self.mock_conn)\n+ def test_check_heartbeat_not_closed(self, close_connection):\n+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n+ self.mock_conn.bytes_received = 128\n+ obj._check_heartbeat()\n+ close_connection.assert_not_called()\n+\n+ @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection')\n+ def test_check_heartbeat_missed_bytes(self, close_connection):\n+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n obj._idle_byte_intervals = self.INTERVAL\n- obj.send_and_check()\n+ obj._check_heartbeat()\n close_connection.assert_called_once_with()\n \n- def test_send_and_check_increment_no_bytes(self):\n+ def test_check_heartbeat_increment_no_bytes(self):\n self.mock_conn.bytes_received = 100\n self.obj._bytes_received = 100\n- self.obj.send_and_check()\n+ self.obj._check_heartbeat()\n self.assertEqual(self.obj._idle_byte_intervals, 1)\n \n- def test_send_and_check_increment_bytes(self):\n+ def test_check_heartbeat_increment_bytes(self):\n self.mock_conn.bytes_received = 100\n self.obj._bytes_received = 128\n- self.obj.send_and_check()\n+ self.obj._check_heartbeat()\n self.assertEqual(self.obj._idle_byte_intervals, 0)\n \n @mock.patch('pika.heartbeat.HeartbeatChecker._update_counters')\n- def test_send_and_check_update_counters(self, update_counters):\n- obj = heartbeat.HeartbeatChecker(self.mock_conn)\n- obj.send_and_check()\n+ def test_check_heartbeat_update_counters(self, update_counters):\n+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n update_counters.assert_called_once_with()\n \n @mock.patch('pika.heartbeat.HeartbeatChecker._send_heartbeat_frame')\n- def test_send_and_check_send_heartbeat_frame(self, send_heartbeat_frame):\n- obj = heartbeat.HeartbeatChecker(self.mock_conn)\n- obj.send_and_check()\n+ def test_send_heartbeat_sends_heartbeat_frame(self, send_heartbeat_frame):\n+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n+ obj._send_heartbeat()\n send_heartbeat_frame.assert_called_once_with()\n \n- @mock.patch('pika.heartbeat.HeartbeatChecker._start_timer')\n- def test_send_and_check_start_timer(self, start_timer):\n- obj = heartbeat.HeartbeatChecker(self.mock_conn)\n- obj.send_and_check()\n- start_timer.assert_called_once_with()\n+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer')\n+ def test_send_heartbeat_start_timer(self, start_send_timer):\n+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n+ start_send_timer.assert_called_once_with()\n+\n+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer')\n+ def test_check_heartbeat_start_timer(self, start_check_timer):\n+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)\n+ start_check_timer.assert_called_once_with()\n \n def test_connection_close(self):\n self.obj._idle_byte_intervals = 3\n self.obj._idle_heartbeat_intervals = 4\n self.obj._close_connection()\n- reason = self.obj._STALE_CONNECTION % (\n- self.obj._max_idle_count * self.obj._interval)\n+ reason = self.obj._STALE_CONNECTION % self.obj._timeout\n self.mock_conn.close.assert_called_once_with(\n self.obj._CONNECTION_FORCED, reason)\n self.mock_conn._on_terminate.assert_called_once_with(\n@@ -157,20 +166,17 @@ class HeartbeatTests(unittest.TestCase):\n self.obj._send_heartbeat_frame()\n self.assertEqual(self.obj._heartbeat_frames_sent, 1)\n \n- def test_setup_timer_called(self):\n- self.mock_conn.add_timeout.assert_called_once_with(\n- self.HALF_INTERVAL, self.obj.send_and_check)\n-\n- @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')\n- def test_start_timer_not_active(self, setup_timer):\n- self.obj._start_timer()\n- setup_timer.assert_not_called()\n+ def test_start_send_timer_called(self):\n+ want = [mock.call(self.SEND_INTERVAL, self.obj._send_heartbeat),\n+ mock.call(self.CHECK_INTERVAL, self.obj._check_heartbeat)]\n+ got = self.mock_conn.add_timeout.call_args_list\n+ self.assertEqual(got, want)\n \n- @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')\n- def test_start_timer_active(self, setup_timer):\n+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer')\n+ def test_start_timer_active(self, setup_send_timer):\n self.mock_conn.heartbeat = self.obj\n- self.obj._start_timer()\n- self.assertTrue(setup_timer.called)\n+ self.obj._start_send_timer()\n+ self.assertTrue(setup_send_timer.called)\n \n def test_update_counters_bytes_received(self):\n self.mock_conn.bytes_received = 256\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 2,\n \"test_score\": 2\n },\n \"num_modified_files\": 3\n}"},"version":{"kind":"string","value":"0.12"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"mock\",\n \"coverage\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"test-requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"certifi==2025.1.31\ncharset-normalizer==3.4.1\ncodecov==2.1.13\ncoverage==7.8.0\nexceptiongroup==1.2.2\nidna==3.10\niniconfig==2.1.0\nmock==5.2.0\nnose==1.3.7\npackaging==24.2\n-e git+https://github.com/pika/pika.git@107fb0fd7028250fda0d8f901b65c93a91d7cb82#egg=pika\npluggy==1.5.0\npytest==8.3.5\nrequests==2.32.3\ntomli==2.2.1\ntornado==6.4.2\nTwisted==15.3.0\nurllib3==2.3.0\nzope.interface==7.2\n"},"environment":{"kind":"string","value":"name: pika\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - certifi==2025.1.31\n - charset-normalizer==3.4.1\n - codecov==2.1.13\n - coverage==7.8.0\n - exceptiongroup==1.2.2\n - idna==3.10\n - iniconfig==2.1.0\n - mock==5.2.0\n - nose==1.3.7\n - packaging==24.2\n - pluggy==1.5.0\n - pytest==8.3.5\n - requests==2.32.3\n - tomli==2.2.1\n - tornado==6.4.2\n - twisted==15.3.0\n - urllib3==2.3.0\n - zope-interface==7.2\nprefix: /opt/conda/envs/pika\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_bytes","tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_no_bytes","tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_missed_bytes","tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_not_closed","tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_start_timer","tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_update_counters","tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_intervals","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_check_timer","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_send_timer","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_not_closed","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_sends_heartbeat_frame","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_start_timer","tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_send_timer_called","tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active"],"string":"[\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_bytes\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_no_bytes\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_missed_bytes\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_not_closed\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_start_timer\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_update_counters\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_intervals\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_check_timer\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_send_timer\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_not_closed\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_sends_heartbeat_frame\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_start_timer\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_send_timer_called\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection","tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false","tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent","tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals","tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false","tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true","tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame","tests/unit/heartbeat_tests.py::HeartbeatTests::test_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented","tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called","tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received","tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent"],"string":"[\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received\",\n \"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"BSD 3-Clause \"New\" or \"Revised\" License"},"__index_level_0__":{"kind":"number","value":2645,"string":"2,645"},"num_tokens_patch":{"kind":"number","value":3454,"string":"3,454"},"before_filepaths":{"kind":"list like","value":["examples/consume.py","pika/connection.py","pika/heartbeat.py"],"string":"[\n \"examples/consume.py\",\n \"pika/connection.py\",\n \"pika/heartbeat.py\"\n]"}}},{"rowIdx":596,"cells":{"instance_id":{"kind":"string","value":"EdinburghGenomics__clarity_scripts-62"},"base_commit":{"kind":"string","value":"6fd2d0a0f7fc2f12213daa9f265b8c5a35e3e7ef"},"created_at":{"kind":"string","value":"2018-06-11 10:59:25"},"environment_setup_commit":{"kind":"string","value":"c2eec150467a3cd2185408cd44a6a773b8b6ee99"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/scripts/convert_and_dispatch_genotypes.py b/scripts/convert_and_dispatch_genotypes.py\nindex 18720e0..aa7959e 100644\n--- a/scripts/convert_and_dispatch_genotypes.py\n+++ b/scripts/convert_and_dispatch_genotypes.py\n@@ -241,8 +241,8 @@ class UploadVcfToSamples(StepEPP):\n # This is the first genotyping results\n lims_sample.udf[submitted_genotype_udf_number_call] = nb_call\n lims_sample.udf[genotype_udf_file_id] = lims_file.id\n- elif lims_sample.udf.get(submitted_genotype_udf_number_call) and \\\n- nb_call > lims_sample.udf.get(submitted_genotype_udf_number_call):\n+ elif submitted_genotype_udf_number_call in lims_sample.udf and \\\n+ nb_call > (lims_sample.udf.get(submitted_genotype_udf_number_call) or 0):\n # This genotyping is better than before\n lims_sample.udf[submitted_genotype_udf_number_call] = nb_call\n lims_sample.udf[genotype_udf_file_id] = lims_file.id\n"},"problem_statement":{"kind":"string","value":"ConvertGenotype does not overwrite best run = 0\nIn `convert_and_dispatch_genotypes.py` the overwriting of the best run UDF does not work when the best run is `0`\r\n\r\n"},"repo":{"kind":"string","value":"EdinburghGenomics/clarity_scripts"},"test_patch":{"kind":"string","value":"diff --git a/tests/test_convert_and_dispatch_genotypes.py b/tests/test_convert_and_dispatch_genotypes.py\nindex 3d34ed3..020c52f 100644\n--- a/tests/test_convert_and_dispatch_genotypes.py\n+++ b/tests/test_convert_and_dispatch_genotypes.py\n@@ -170,7 +170,7 @@ class TestUploadVcfToSamples(TestEPP):\n }\n self.lims_sample2.udf = {\n 'QuantStudio Data Import Completed #': 1,\n- 'Number of Calls (Best Run)': 12,\n+ 'Number of Calls (Best Run)': 0,\n 'Genotyping results file id': 'old_file_id'\n }\n mlims.upload_new_file.return_value = Mock(id='file_id')\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 3,\n \"test_score\": 0\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"0.7"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"pytest-cov\"\n ],\n \"pre_install\": null,\n \"python\": \"3.6\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"asana==0.6.7\nattrs==22.2.0\ncached-property==1.5.2\ncertifi==2021.5.30\n-e git+https://github.com/EdinburghGenomics/clarity_scripts.git@6fd2d0a0f7fc2f12213daa9f265b8c5a35e3e7ef#egg=clarity_scripts\ncoverage==6.2\nEGCG-Core==0.8.1\nimportlib-metadata==4.8.3\niniconfig==1.1.1\nJinja2==2.8\nMarkupSafe==2.0.1\noauthlib==3.2.2\npackaging==21.3\npluggy==1.0.0\npy==1.11.0\npyclarity-lims==0.4.8\npyparsing==3.1.4\npytest==7.0.1\npytest-cov==4.0.0\nPyYAML==6.0.1\nrequests==2.14.2\nrequests-oauthlib==0.8.0\nsix==1.17.0\ntomli==1.2.3\ntyping_extensions==4.1.1\nzipp==3.6.0\n"},"environment":{"kind":"string","value":"name: clarity_scripts\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - certifi=2021.5.30=py36h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.3=he6710b0_2\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=1.1.1w=h7f8727e_0\n - pip=21.2.2=py36h06a4308_0\n - python=3.6.13=h12debd9_1\n - readline=8.2=h5eee18b_0\n - setuptools=58.0.4=py36h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - wheel=0.37.1=pyhd3eb1b0_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - asana==0.6.7\n - attrs==22.2.0\n - cached-property==1.5.2\n - coverage==6.2\n - egcg-core==0.8.1\n - importlib-metadata==4.8.3\n - iniconfig==1.1.1\n - jinja2==2.8\n - markupsafe==2.0.1\n - oauthlib==3.2.2\n - packaging==21.3\n - pluggy==1.0.0\n - py==1.11.0\n - pyclarity-lims==0.4.8\n - pyparsing==3.1.4\n - pytest==7.0.1\n - pytest-cov==4.0.0\n - pyyaml==6.0.1\n - requests==2.14.2\n - requests-oauthlib==0.8.0\n - six==1.17.0\n - tomli==1.2.3\n - typing-extensions==4.1.1\n - zipp==3.6.0\nprefix: /opt/conda/envs/clarity_scripts\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_upload_second_time"],"string":"[\n \"tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_upload_second_time\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_convert_and_dispatch_genotypes.py::TestEPP::test_init","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_find_field","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_generate_vcf","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_get_genotype_from_call","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_order_from_fai","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_parse_QuantStudio_AIF_genotype","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_parse_genome_fai","tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_vcf_header_from_ref_length","tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_init","tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_upload_first_time"],"string":"[\n \"tests/test_convert_and_dispatch_genotypes.py::TestEPP::test_init\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_find_field\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_generate_vcf\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_get_genotype_from_call\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_order_from_fai\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_parse_QuantStudio_AIF_genotype\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_parse_genome_fai\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_vcf_header_from_ref_length\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_init\",\n \"tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_upload_first_time\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2653,"string":"2,653"},"num_tokens_patch":{"kind":"number","value":274,"string":"274"},"before_filepaths":{"kind":"list like","value":["scripts/convert_and_dispatch_genotypes.py"],"string":"[\n \"scripts/convert_and_dispatch_genotypes.py\"\n]"}}},{"rowIdx":597,"cells":{"instance_id":{"kind":"string","value":"mido__mido-164"},"base_commit":{"kind":"string","value":"e87384d7e5d62de361a65ab6b1d5d62750475e84"},"created_at":{"kind":"string","value":"2018-06-11 12:41:40"},"environment_setup_commit":{"kind":"string","value":"dd1b42f39678982e887a0bd5b25e104a6859ff5f"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/mido/frozen.py b/mido/frozen.py\nindex 2c04f1f..20d629d 100644\n--- a/mido/frozen.py\n+++ b/mido/frozen.py\n@@ -3,10 +3,15 @@ from .midifiles import MetaMessage, UnknownMetaMessage\n \n \n class Frozen(object):\n- def __repr__(self):\n- text = super(Frozen, self).__repr__()\n+ def __str__(self):\n+ text = super(Frozen, self).__str__()\n return ''.format(str(self))\n-\n def _setattr(self, name, value):\n if name == 'type':\n raise AttributeError('type attribute is read only')\ndiff --git a/mido/midifiles/meta.py b/mido/midifiles/meta.py\nindex be9552c..8b73da0 100644\n--- a/mido/midifiles/meta.py\n+++ b/mido/midifiles/meta.py\n@@ -535,7 +535,7 @@ class MetaMessage(BaseMessage):\n encode_variable_int(len(data)) +\n data)\n \n- def __repr__(self):\n+ def __str__(self):\n spec = _META_SPEC_BY_TYPE[self.type]\n attributes = []\n for name in spec.attributes:\n@@ -561,7 +561,7 @@ class UnknownMetaMessage(MetaMessage):\n 'data': data,\n 'time': time})\n \n- def __repr__(self):\n+ def __str__(self):\n return ('').format(self.type_byte,\n@@ -569,6 +569,11 @@ class UnknownMetaMessage(MetaMessage):\n self.time\n )\n \n+ def __repr__(self):\n+ # fix message type artifact\n+ r = super(UnknownMetaMessage, self).__repr__()\n+ return r.replace(\"'unknown_meta', \", '')\n+\n def __setattr__(self, name, value):\n # This doesn't do any checking.\n # It probably should.\ndiff --git a/mido/midifiles/midifiles.py b/mido/midifiles/midifiles.py\nindex bb885a2..f3db7cb 100644\n--- a/mido/midifiles/midifiles.py\n+++ b/mido/midifiles/midifiles.py\n@@ -293,7 +293,8 @@ class MidiFile(object):\n type=1, ticks_per_beat=DEFAULT_TICKS_PER_BEAT,\n charset='latin1',\n debug=False,\n- clip=False\n+ clip=False,\n+ tracks=None\n ):\n \n self.filename = filename\n@@ -309,7 +310,9 @@ class MidiFile(object):\n raise ValueError(\n 'invalid format {} (must be 0, 1 or 2)'.format(format))\n \n- if file is not None:\n+ if tracks is not None:\n+ self.tracks = tracks\n+ elif file is not None:\n self._load(file)\n elif self.filename is not None:\n with io.open(filename, 'rb') as file:\n@@ -461,11 +464,18 @@ class MidiFile(object):\n else:\n print('{!r}'.format(msg))\n \n- def __repr__(self):\n+ def __str__(self):\n return ''.format(\n self.filename, self.type, len(self.tracks),\n sum([len(track) for track in self.tracks]))\n \n+ def __repr__(self):\n+ tracks_str = ',\\n'.join(repr(track) for track in self.tracks)\n+ tracks_str = '\\n'.join(' ' + line for line in tracks_str.splitlines())\n+ tracks_str = (', tracks=[\\n%s\\n]' % tracks_str) if self.tracks else ''\n+ return 'MidiFile(type=%s, ticks_per_beat=%s%s)' % (\n+ self.type, self.ticks_per_beat, tracks_str)\n+\n # The context manager has no purpose but is kept around since it was\n # used in examples in the past.\n def __enter__(self):\ndiff --git a/mido/midifiles/tracks.py b/mido/midifiles/tracks.py\nindex 59ad1df..6140688 100644\n--- a/mido/midifiles/tracks.py\n+++ b/mido/midifiles/tracks.py\n@@ -50,9 +50,16 @@ class MidiTrack(list):\n def __mul__(self, other):\n return self.__class__(list.__mul__(self, other))\n \n- def __repr__(self):\n+ def __str__(self):\n return ''.format(self.name, len(self))\n \n+ def __repr__(self):\n+ messages = ''\n+ if len(self) > 0:\n+ template = '[\\n %s]' if len(self) > 1 else '[%s]'\n+ messages = template % ',\\n '.join(repr(m) for m in self)\n+ return 'MidiTrack(%s)' % messages\n+\n \n def _to_abstime(messages):\n \"\"\"Convert messages to absolute time.\"\"\"\n"},"problem_statement":{"kind":"string","value":"proper __repr__ for MidiFile, etc.\nPer Python docs, `__repr__()` output \"should normally look like a valid Python expression that can be used to recreate an object with the same value\".\r\n\r\nCurrently for MidiFile, repr() gives the same result as str(), which is just a summary including number of tracks, etc. Likewise MidiTrack gives a summary with number of messages.\r\n\r\nOne place a proper repr() would be immediately useful is for tests, e.g. to confirm that code changes don't alter generated MIDI messages, tracks, or files. It would also facilitate viewing differences between MIDI data in text form.\r\n\r\nWith a proper repr(), it would also be trivial to implement MidiFile `__eq__` (see #150)."},"repo":{"kind":"string","value":"mido/mido"},"test_patch":{"kind":"string","value":"diff --git a/tests/messages/test_messages.py b/tests/messages/test_messages.py\nindex a834fbd..1b076c0 100644\n--- a/tests/messages/test_messages.py\n+++ b/tests/messages/test_messages.py\n@@ -108,3 +108,9 @@ def test_dict_sysex_data():\n def test_from_hex_sysex_data_type():\n msg = Message.from_hex('F0 01 02 03 F7')\n assert isinstance(msg.data, SysexData)\n+\n+\n+def test_repr():\n+ msg = Message('note_on', channel=1, note=2, time=3)\n+ msg_eval = eval(repr(msg))\n+ assert msg == msg_eval\ndiff --git a/tests/midifiles/test_meta.py b/tests/midifiles/test_meta.py\nindex 3ea3c71..4ac6145 100644\n--- a/tests/midifiles/test_meta.py\n+++ b/tests/midifiles/test_meta.py\n@@ -1,5 +1,5 @@\n import pytest\n-from mido.midifiles.meta import MetaMessage, MetaSpec_key_signature, KeySignatureError\n+from mido.midifiles.meta import MetaMessage, UnknownMetaMessage, MetaSpec_key_signature, KeySignatureError\n \n \n def test_copy_invalid_argument():\n@@ -30,3 +30,13 @@ class TestKeySignature:\n msg = MetaMessage('key_signature')\n MetaSpec_key_signature().decode(msg, input_bytes)\n assert msg.key == expect_sig\n+\n+def test_meta_message_repr():\n+ msg = MetaMessage('end_of_track', time=10)\n+ msg_eval = eval(repr(msg))\n+ assert msg == msg_eval\n+\n+def test_unknown_meta_message_repr():\n+ msg = UnknownMetaMessage(type_byte=99, data=[1, 2], time=10)\n+ msg_eval = eval(repr(msg))\n+ assert msg == msg_eval\ndiff --git a/tests/midifiles/test_midifiles.py b/tests/midifiles/test_midifiles.py\nindex 8231e45..a579ed7 100644\n--- a/tests/midifiles/test_midifiles.py\n+++ b/tests/midifiles/test_midifiles.py\n@@ -1,7 +1,7 @@\n import io\n from pytest import raises\n from mido.messages import Message\n-from mido.midifiles.midifiles import MidiFile\n+from mido.midifiles.midifiles import MidiFile, MidiTrack\n from mido.midifiles.meta import MetaMessage, KeySignatureError\n \n HEADER_ONE_TRACK = \"\"\"\n@@ -163,3 +163,19 @@ def test_meta_messages_with_length_0():\n \n MetaMessage('end_of_track'),\n ]\n+\n+\n+def test_midifile_repr():\n+ midifile = MidiFile(type=1, ticks_per_beat=123, tracks=[\n+ MidiTrack([\n+ Message('note_on', channel=1, note=2, time=3),\n+ Message('note_off', channel=1, note=2, time=3)]),\n+ MidiTrack([\n+ MetaMessage('sequence_number', number=5),\n+ Message('note_on', channel=2, note=6, time=9),\n+ Message('note_off', channel=2, note=6, time=9)]),\n+ ])\n+ midifile_eval = eval(repr(midifile))\n+ for track, track_eval in zip(midifile.tracks, midifile_eval.tracks):\n+ for m1, m2 in zip(track, track_eval):\n+ assert m1 == m2\ndiff --git a/tests/midifiles/test_tracks.py b/tests/midifiles/test_tracks.py\nindex 6dddcde..54467b8 100644\n--- a/tests/midifiles/test_tracks.py\n+++ b/tests/midifiles/test_tracks.py\n@@ -1,6 +1,10 @@\n+import itertools\n+from mido.messages import Message\n from mido.midifiles.meta import MetaMessage\n from mido.midifiles.tracks import MidiTrack\n \n+zip = getattr(itertools, 'izip', zip)\n+\n \n def test_track_slice():\n track = MidiTrack()\n@@ -16,3 +20,13 @@ def test_track_name():\n # The track should use the first name it finds.\n track = MidiTrack([name1, name2])\n assert track.name == name1.name\n+\n+\n+def test_track_repr():\n+ track = MidiTrack([\n+ Message('note_on', channel=1, note=2, time=3),\n+ Message('note_off', channel=1, note=2, time=3),\n+ ])\n+ track_eval = eval(repr(track))\n+ for m1, m2 in zip(track, track_eval):\n+ assert m1 == m2\ndiff --git a/tests/test_frozen.py b/tests/test_frozen.py\nindex b26f010..0551d2f 100644\n--- a/tests/test_frozen.py\n+++ b/tests/test_frozen.py\n@@ -1,4 +1,5 @@\n from mido.messages import Message\n+from mido.midifiles.meta import MetaMessage, UnknownMetaMessage\n from mido.frozen import (is_frozen, freeze_message, thaw_message,\n FrozenMessage, FrozenMetaMessage,\n FrozenUnknownMetaMessage)\n@@ -27,3 +28,24 @@ def test_thawed_message_is_copy():\n def test_is_frozen():\n assert is_frozen(FrozenMessage('note_on'))\n assert not is_frozen(Message('note_on'))\n+\n+\n+def test_frozen_repr():\n+ msg = FrozenMessage('note_on', channel=1, note=2, time=3)\n+ msg_eval = eval(repr(msg))\n+ assert type(msg_eval) == Message\n+ assert msg == msg_eval\n+\n+\n+def test_frozen_meta_repr():\n+ msg = FrozenMetaMessage('end_of_track', time=10)\n+ msg_eval = eval(repr(msg))\n+ assert type(msg_eval) == MetaMessage\n+ assert msg == msg_eval\n+\n+\n+def test_frozen_unknown_meta_repr():\n+ msg = FrozenUnknownMetaMessage(type_byte=99, data=[1, 2], time=10)\n+ msg_eval = eval(repr(msg))\n+ assert type(msg_eval) == UnknownMetaMessage\n+ assert msg == msg_eval\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_issue_reference\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 1\n },\n \"num_modified_files\": 5\n}"},"version":{"kind":"string","value":"1.2"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\",\n \"check-manifest\",\n \"flake8\",\n \"sphinx\"\n ],\n \"pre_install\": null,\n \"python\": \"3.9\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"alabaster==0.7.16\nbabel==2.17.0\nbuild==1.2.2.post1\ncachetools==5.5.2\ncertifi==2025.1.31\nchardet==5.2.0\ncharset-normalizer==3.4.1\ncheck-manifest==0.50\ncolorama==0.4.6\ndistlib==0.3.9\ndocutils==0.21.2\nexceptiongroup @ file:///croot/exceptiongroup_1706031385326/work\nfilelock==3.18.0\nflake8==7.2.0\nidna==3.10\nimagesize==1.4.1\nimportlib_metadata==8.6.1\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\nJinja2==3.1.6\nMarkupSafe==3.0.2\nmccabe==0.7.0\n-e git+https://github.com/mido/mido.git@e87384d7e5d62de361a65ab6b1d5d62750475e84#egg=mido\npackaging @ file:///croot/packaging_1734472117206/work\nplatformdirs==4.3.7\npluggy @ file:///croot/pluggy_1733169602837/work\npycodestyle==2.13.0\npyflakes==3.3.1\nPygments==2.19.1\npyproject-api==1.9.0\npyproject_hooks==1.2.0\npytest @ file:///croot/pytest_1738938843180/work\nrequests==2.32.3\nsnowballstemmer==2.2.0\nSphinx==7.4.7\nsphinxcontrib-applehelp==2.0.0\nsphinxcontrib-devhelp==2.0.0\nsphinxcontrib-htmlhelp==2.1.0\nsphinxcontrib-jsmath==1.0.1\nsphinxcontrib-qthelp==2.0.0\nsphinxcontrib-serializinghtml==2.0.0\ntomli==2.2.1\ntox==4.25.0\ntyping_extensions==4.13.0\nurllib3==2.3.0\nvirtualenv==20.29.3\nzipp==3.21.0\n"},"environment":{"kind":"string","value":"name: mido\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - exceptiongroup=1.2.0=py39h06a4308_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - packaging=24.2=py39h06a4308_0\n - pip=25.0=py39h06a4308_0\n - pluggy=1.5.0=py39h06a4308_0\n - pytest=8.3.4=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - alabaster==0.7.16\n - babel==2.17.0\n - build==1.2.2.post1\n - cachetools==5.5.2\n - certifi==2025.1.31\n - chardet==5.2.0\n - charset-normalizer==3.4.1\n - check-manifest==0.50\n - colorama==0.4.6\n - distlib==0.3.9\n - docutils==0.21.2\n - filelock==3.18.0\n - flake8==7.2.0\n - idna==3.10\n - imagesize==1.4.1\n - importlib-metadata==8.6.1\n - jinja2==3.1.6\n - markupsafe==3.0.2\n - mccabe==0.7.0\n - platformdirs==4.3.7\n - pycodestyle==2.13.0\n - pyflakes==3.3.1\n - pygments==2.19.1\n - pyproject-api==1.9.0\n - pyproject-hooks==1.2.0\n - requests==2.32.3\n - snowballstemmer==2.2.0\n - sphinx==7.4.7\n - sphinxcontrib-applehelp==2.0.0\n - sphinxcontrib-devhelp==2.0.0\n - sphinxcontrib-htmlhelp==2.1.0\n - sphinxcontrib-jsmath==1.0.1\n - sphinxcontrib-qthelp==2.0.0\n - sphinxcontrib-serializinghtml==2.0.0\n - tomli==2.2.1\n - tox==4.25.0\n - typing-extensions==4.13.0\n - urllib3==2.3.0\n - virtualenv==20.29.3\n - zipp==3.21.0\nprefix: /opt/conda/envs/mido\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/messages/test_messages.py::test_repr","tests/midifiles/test_meta.py::test_meta_message_repr","tests/midifiles/test_meta.py::test_unknown_meta_message_repr","tests/midifiles/test_midifiles.py::test_midifile_repr","tests/midifiles/test_tracks.py::test_track_repr","tests/test_frozen.py::test_frozen_repr","tests/test_frozen.py::test_frozen_meta_repr","tests/test_frozen.py::test_frozen_unknown_meta_repr"],"string":"[\n \"tests/messages/test_messages.py::test_repr\",\n \"tests/midifiles/test_meta.py::test_meta_message_repr\",\n \"tests/midifiles/test_meta.py::test_unknown_meta_message_repr\",\n \"tests/midifiles/test_midifiles.py::test_midifile_repr\",\n \"tests/midifiles/test_tracks.py::test_track_repr\",\n \"tests/test_frozen.py::test_frozen_repr\",\n \"tests/test_frozen.py::test_frozen_meta_repr\",\n \"tests/test_frozen.py::test_frozen_unknown_meta_repr\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/messages/test_messages.py::test_msg_time_equality","tests/messages/test_messages.py::test_set_type","tests/messages/test_messages.py::test_encode_pitchwheel","tests/messages/test_messages.py::test_decode_pitchwheel","tests/messages/test_messages.py::test_encode_songpos","tests/messages/test_messages.py::test_decode_songpos","tests/messages/test_messages.py::test_sysex_data_is_sysexdata_object","tests/messages/test_messages.py::test_sysex_data_accepts_different_types","tests/messages/test_messages.py::test_copy","tests/messages/test_messages.py::test_init_invalid_argument","tests/messages/test_messages.py::test_copy_invalid_argument","tests/messages/test_messages.py::test_copy_cant_change_type","tests/messages/test_messages.py::test_copy_can_have_same_type","tests/messages/test_messages.py::test_copy_handles_data_generator","tests/messages/test_messages.py::test_compare_with_nonmessage","tests/messages/test_messages.py::test_from_dict_default_values","tests/messages/test_messages.py::test_dict_sysex_data","tests/messages/test_messages.py::test_from_hex_sysex_data_type","tests/midifiles/test_meta.py::test_copy_invalid_argument","tests/midifiles/test_meta.py::test_copy_cant_override_type","tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig0]","tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig1]","tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig2]","tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig3]","tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig4]","tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes0-C]","tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes1-Am]","tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes2-Cb]","tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes3-Abm]","tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes4-A#m]","tests/midifiles/test_midifiles.py::test_no_tracks","tests/midifiles/test_midifiles.py::test_single_message","tests/midifiles/test_midifiles.py::test_too_long_message","tests/midifiles/test_midifiles.py::test_two_tracks","tests/midifiles/test_midifiles.py::test_empty_file","tests/midifiles/test_midifiles.py::test_eof_in_track","tests/midifiles/test_midifiles.py::test_invalid_data_byte_no_clipping","tests/midifiles/test_midifiles.py::test_invalid_data_byte_with_clipping_high","tests/midifiles/test_midifiles.py::test_meta_messages","tests/midifiles/test_midifiles.py::test_meta_message_bad_key_sig_throws_key_signature_error_sharps","tests/midifiles/test_midifiles.py::test_meta_message_bad_key_sig_throws_key_signature_error_flats","tests/midifiles/test_midifiles.py::test_meta_messages_with_length_0","tests/midifiles/test_tracks.py::test_track_slice","tests/midifiles/test_tracks.py::test_track_name","tests/test_frozen.py::test_hashability","tests/test_frozen.py::test_freeze_and_thaw","tests/test_frozen.py::test_thawed_message_is_copy","tests/test_frozen.py::test_is_frozen"],"string":"[\n \"tests/messages/test_messages.py::test_msg_time_equality\",\n \"tests/messages/test_messages.py::test_set_type\",\n \"tests/messages/test_messages.py::test_encode_pitchwheel\",\n \"tests/messages/test_messages.py::test_decode_pitchwheel\",\n \"tests/messages/test_messages.py::test_encode_songpos\",\n \"tests/messages/test_messages.py::test_decode_songpos\",\n \"tests/messages/test_messages.py::test_sysex_data_is_sysexdata_object\",\n \"tests/messages/test_messages.py::test_sysex_data_accepts_different_types\",\n \"tests/messages/test_messages.py::test_copy\",\n \"tests/messages/test_messages.py::test_init_invalid_argument\",\n \"tests/messages/test_messages.py::test_copy_invalid_argument\",\n \"tests/messages/test_messages.py::test_copy_cant_change_type\",\n \"tests/messages/test_messages.py::test_copy_can_have_same_type\",\n \"tests/messages/test_messages.py::test_copy_handles_data_generator\",\n \"tests/messages/test_messages.py::test_compare_with_nonmessage\",\n \"tests/messages/test_messages.py::test_from_dict_default_values\",\n \"tests/messages/test_messages.py::test_dict_sysex_data\",\n \"tests/messages/test_messages.py::test_from_hex_sysex_data_type\",\n \"tests/midifiles/test_meta.py::test_copy_invalid_argument\",\n \"tests/midifiles/test_meta.py::test_copy_cant_override_type\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig0]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig1]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig2]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig3]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig4]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes0-C]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes1-Am]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes2-Cb]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes3-Abm]\",\n \"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes4-A#m]\",\n \"tests/midifiles/test_midifiles.py::test_no_tracks\",\n \"tests/midifiles/test_midifiles.py::test_single_message\",\n \"tests/midifiles/test_midifiles.py::test_too_long_message\",\n \"tests/midifiles/test_midifiles.py::test_two_tracks\",\n \"tests/midifiles/test_midifiles.py::test_empty_file\",\n \"tests/midifiles/test_midifiles.py::test_eof_in_track\",\n \"tests/midifiles/test_midifiles.py::test_invalid_data_byte_no_clipping\",\n \"tests/midifiles/test_midifiles.py::test_invalid_data_byte_with_clipping_high\",\n \"tests/midifiles/test_midifiles.py::test_meta_messages\",\n \"tests/midifiles/test_midifiles.py::test_meta_message_bad_key_sig_throws_key_signature_error_sharps\",\n \"tests/midifiles/test_midifiles.py::test_meta_message_bad_key_sig_throws_key_signature_error_flats\",\n \"tests/midifiles/test_midifiles.py::test_meta_messages_with_length_0\",\n \"tests/midifiles/test_tracks.py::test_track_slice\",\n \"tests/midifiles/test_tracks.py::test_track_name\",\n \"tests/test_frozen.py::test_hashability\",\n \"tests/test_frozen.py::test_freeze_and_thaw\",\n \"tests/test_frozen.py::test_thawed_message_is_copy\",\n \"tests/test_frozen.py::test_is_frozen\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2654,"string":"2,654"},"num_tokens_patch":{"kind":"number","value":1516,"string":"1,516"},"before_filepaths":{"kind":"list like","value":["mido/frozen.py","mido/messages/messages.py","mido/midifiles/meta.py","mido/midifiles/midifiles.py","mido/midifiles/tracks.py"],"string":"[\n \"mido/frozen.py\",\n \"mido/messages/messages.py\",\n \"mido/midifiles/meta.py\",\n \"mido/midifiles/midifiles.py\",\n \"mido/midifiles/tracks.py\"\n]"}}},{"rowIdx":598,"cells":{"instance_id":{"kind":"string","value":"google__docker-explorer-31"},"base_commit":{"kind":"string","value":"80376ae9503280241d6c14838a69d026f0987da9"},"created_at":{"kind":"string","value":"2018-06-12 12:02:11"},"environment_setup_commit":{"kind":"string","value":"80376ae9503280241d6c14838a69d026f0987da9"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/docker_explorer/de.py b/docker_explorer/de.py\nindex ea8afa3..1e78818 100644\n--- a/docker_explorer/de.py\n+++ b/docker_explorer/de.py\n@@ -24,7 +24,6 @@ import os\n \n from docker_explorer import errors\n from docker_explorer.lib import container\n-from docker_explorer.lib import storage\n from docker_explorer.lib import utils\n \n \n@@ -34,8 +33,6 @@ class DockerExplorer(object):\n Attributes:\n docker_directory (str): Path to use as the root of the Docker runtime.\n Default is '/var/lib/docker'.\n- storage_object (lib.Storage): The object implementing the methods for\n- exploring the Docker containers.\n \"\"\"\n \n def __init__(self):\n@@ -45,16 +42,12 @@ class DockerExplorer(object):\n self.containers_directory = None\n self.docker_directory = None\n self.docker_version = 2\n- self.storage_object = None\n \n def _SetDockerDirectory(self, docker_path):\n \"\"\"Sets the Docker main directory.\n \n Args:\n docker_path(str): the absolute path to the docker directory.\n-\n- Raises:\n- errors.BadStorageException: If the storage backend couldn't be detected.\n \"\"\"\n self.docker_directory = docker_path\n if not os.path.isdir(self.docker_directory):\n@@ -67,29 +60,6 @@ class DockerExplorer(object):\n self.containers_directory = os.path.join(\n self.docker_directory, 'containers')\n \n- if os.path.isfile(\n- os.path.join(self.docker_directory, 'repositories-aufs')):\n- # TODO: check this agains other storages in version 1.9 and below\n- self.docker_version = 1\n- self.storage_object = storage.AufsStorage(\n- docker_directory=self.docker_directory, docker_version=1)\n- elif os.path.isdir(os.path.join(self.docker_directory, 'overlay2')):\n- self.storage_object = storage.Overlay2Storage(\n- docker_directory=self.docker_directory)\n- elif os.path.isdir(os.path.join(self.docker_directory, 'overlay')):\n- self.storage_object = storage.OverlayStorage(\n- docker_directory=self.docker_directory)\n- elif os.path.isdir(os.path.join(self.docker_directory, 'aufs')):\n- self.storage_object = storage.AufsStorage(\n- docker_directory=self.docker_directory)\n- if self.storage_object is None:\n- err_message = (\n- 'Could not detect storage system. '\n- 'Make sure the docker directory ({0:s}) is correct. '\n- 'If it is correct, you might want to run this script'\n- ' with higher privileges.'.format(self.docker_directory))\n- raise errors.BadStorageException(err_message)\n-\n def AddBasicOptions(self, argument_parser):\n \"\"\"Adds the global options to the argument_parser.\n \n@@ -218,7 +188,7 @@ class DockerExplorer(object):\n mountpoint (str): the path to the destination mount point.\n \"\"\"\n container_object = self.GetContainer(container_id)\n- self.storage_object.Mount(container_object, mountpoint)\n+ container_object.Mount(mountpoint)\n \n def GetContainersString(self, only_running=False):\n \"\"\"Returns a string describing the running containers.\n@@ -260,10 +230,6 @@ class DockerExplorer(object):\n \"\"\"\n print(self.GetContainersString(only_running=only_running))\n \n- def ShowRepositories(self):\n- \"\"\"Displays information about the images in the Docker repository.\"\"\"\n- print(self.storage_object.ShowRepositories())\n-\n def ShowHistory(self, container_id, show_empty_layers=False):\n \"\"\"Prints the modification history of a container.\n \n@@ -274,6 +240,33 @@ class DockerExplorer(object):\n container_object = self.GetContainer(container_id)\n print(container_object.GetHistory(show_empty_layers))\n \n+ def GetRepositoriesString(self):\n+ \"\"\"Returns information about images in the local Docker repositories.\n+\n+ Returns:\n+ str: human readable list of images in local Docker repositories.\n+ \"\"\"\n+ result_string = ''\n+ repositories = []\n+ if self.docker_version == 1:\n+ repositories = [os.path.join(self.docker_directory, 'repositories-aufs')]\n+ else:\n+ image_path = os.path.join(self.docker_directory, 'image')\n+ for storage_method in os.listdir(image_path):\n+ repositories_file_path = os.path.join(\n+ image_path, storage_method, 'repositories.json')\n+ if os.path.isfile(repositories_file_path):\n+ repositories.append(repositories_file_path)\n+\n+ for repositories_file_path in repositories:\n+ result_string += (\n+ 'Listing repositories from file {0:s}\\n'.format(\n+ repositories_file_path))\n+ with open(repositories_file_path) as rf:\n+ result_string += utils.PrettyPrintJSON(rf.read())\n+\n+ return result_string\n+\n def Main(self):\n \"\"\"The main method for the DockerExplorer class.\n \n@@ -287,7 +280,6 @@ class DockerExplorer(object):\n \n self._SetDockerDirectory(self.docker_directory)\n \n-\n if options.command == 'mount':\n self.Mount(options.container_id, options.mountpoint)\n \n@@ -301,7 +293,7 @@ class DockerExplorer(object):\n elif options.what == 'running_containers':\n self.ShowContainers(only_running=True)\n elif options.what == 'repositories':\n- self.ShowRepositories()\n+ print(self.GetRepositoriesString())\n \n else:\n raise ValueError('Unhandled command %s' % options.command)\ndiff --git a/docker_explorer/lib/container.py b/docker_explorer/lib/container.py\nindex 4321385..04cec32 100644\n--- a/docker_explorer/lib/container.py\n+++ b/docker_explorer/lib/container.py\n@@ -18,8 +18,10 @@ from __future__ import print_function, unicode_literals\n \n import json\n import os\n+import subprocess\n \n from docker_explorer import errors\n+from docker_explorer.lib import storage\n from docker_explorer.lib import utils\n \n \n@@ -38,11 +40,18 @@ class Container(object):\n name (str): the name of the container.\n running (boolean): True if the container is running.\n start_timestamp (str): the container's start timestamp.\n- storage_driver (str): the container's storage driver.\n+ storage_name (str): the container's storage driver name.\n+ storage_object (BaseStorage): the container's storage backend object.\n volumes (list(tuple)): list of mount points to bind from host to the\n container. (Docker storage backend v1).\n \"\"\"\n \n+ STORAGES_MAP = {\n+ 'aufs': storage.AufsStorage,\n+ 'overlay': storage.OverlayStorage,\n+ 'overlay2': storage.Overlay2Storage\n+ }\n+\n def __init__(self, docker_directory, container_id, docker_version=2):\n \"\"\"Initializes the Container class.\n \n@@ -88,16 +97,18 @@ class Container(object):\n if json_state:\n self.running = json_state.get('Running', False)\n self.start_timestamp = json_state.get('StartedAt', False)\n- self.storage_driver = container_info_dict.get('Driver', None)\n- if self.storage_driver is None:\n+ self.storage_name = container_info_dict.get('Driver', None)\n+ if self.storage_name is None:\n raise errors.BadContainerException(\n '{0} container config file lacks Driver key'.format(\n container_info_json_path))\n+\n+ self._SetStorage(self.storage_name)\n self.volumes = container_info_dict.get('Volumes', None)\n \n if self.docker_version == 2:\n c_path = os.path.join(\n- self.docker_directory, 'image', self.storage_driver, 'layerdb',\n+ self.docker_directory, 'image', self.storage_name, 'layerdb',\n 'mounts', container_id)\n with open(os.path.join(c_path, 'mount-id')) as mount_id_file:\n self.mount_id = mount_id_file.read()\n@@ -131,7 +142,7 @@ class Container(object):\n elif self.docker_version == 2:\n hash_method, layer_id = layer_id.split(':')\n layer_info_path = os.path.join(\n- self.docker_directory, 'image', self.storage_driver, 'imagedb',\n+ self.docker_directory, 'image', self.storage_name, 'imagedb',\n 'content', hash_method, layer_id)\n if os.path.isfile(layer_info_path):\n with open(layer_info_path) as layer_info_file:\n@@ -162,7 +173,7 @@ class Container(object):\n elif self.docker_version == 2:\n hash_method, layer_id = current_layer.split(':')\n parent_layer_path = os.path.join(\n- self.docker_directory, 'image', self.storage_driver, 'imagedb',\n+ self.docker_directory, 'image', self.storage_name, 'imagedb',\n 'metadata', hash_method, layer_id, 'parent')\n if not os.path.isfile(parent_layer_path):\n break\n@@ -204,3 +215,41 @@ class Container(object):\n else:\n history_str += 'Empty layer'\n return history_str\n+\n+ def _SetStorage(self, storage_name):\n+ \"\"\"Sets the storage_object attribute.\n+\n+ Args:\n+ storage_name (str): the name of the storage.\n+ Returns:\n+ BaseStorage: a storage object.\n+ Raises:\n+ BadContainerException: if no storage Driver is defined, or if it is not\n+ implemented\n+ \"\"\"\n+ storage_class = self.STORAGES_MAP.get(storage_name, None)\n+\n+ if storage_class is None:\n+ raise errors.BadContainerException(\n+ 'Storage driver {0} is not implemented'.format(storage_name))\n+\n+ self.storage_object = storage_class(\n+ self.docker_directory, self.docker_version)\n+\n+ def Mount(self, mount_dir):\n+ \"\"\"Mounts the specified container's filesystem.\n+\n+ Args:\n+ mount_dir (str): the path to the destination mount point\n+ \"\"\"\n+\n+ commands = self.storage_object.MakeMountCommands(self, mount_dir)\n+ for c in commands:\n+ print(c)\n+ print('Do you want to mount this container ID: {0:s} on {1:s} ?\\n'\n+ '(ie: run these commands) [Y/n]'.format(self.container_id, mount_dir))\n+ choice = raw_input().lower()\n+ if not choice in ['y', 'yes', '']:\n+ for c in commands:\n+ # TODO() this is quite unsafe, need to properly split args\n+ subprocess.call(c, shell=True)\ndiff --git a/docker_explorer/lib/storage.py b/docker_explorer/lib/storage.py\nindex 1735fa2..2cc96c1 100644\n--- a/docker_explorer/lib/storage.py\n+++ b/docker_explorer/lib/storage.py\n@@ -17,11 +17,8 @@\n from __future__ import print_function, unicode_literals\n \n import os\n-import subprocess\n import sys\n \n-from docker_explorer.lib import utils\n-\n \n class BaseStorage(object):\n \"\"\"This class provides tools to list and access containers metadata.\n@@ -51,24 +48,6 @@ class BaseStorage(object):\n if self.docker_version == 1:\n self.container_config_filename = 'config.json'\n \n- def ShowRepositories(self):\n- \"\"\"Returns information about the images in the Docker repository.\n-\n- Returns:\n- str: human readable information about image repositories.\n- \"\"\"\n- repositories_file_path = os.path.join(\n- self.docker_directory, 'image', self.STORAGE_METHOD,\n- 'repositories.json')\n- if self.docker_version == 1:\n- repositories_file_path = os.path.join(\n- self.docker_directory, 'repositories-aufs')\n- result_string = (\n- 'Listing repositories from file {0:s}').format(repositories_file_path)\n- with open(repositories_file_path) as rf:\n- repositories_string = rf.read()\n- return result_string + utils.PrettyPrintJSON(repositories_string)\n-\n def MakeMountCommands(self, container_object, mount_dir):\n \"\"\"Generates the required shell commands to mount a container given its ID.\n \n@@ -123,25 +102,6 @@ class BaseStorage(object):\n \n return extra_commands\n \n- def Mount(self, container_object, mount_dir):\n- \"\"\"Mounts the specified container's filesystem.\n-\n- Args:\n- container_object (Container): the container.\n- mount_dir (str): the path to the destination mount point\n- \"\"\"\n-\n- commands = self.MakeMountCommands(container_object, mount_dir)\n- for c in commands:\n- print(c)\n- print('Do you want to mount this container Id: {0:s} on {1:s} ?\\n'\n- '(ie: run these commands) [Y/n]'.format(\n- container_object.container_id, mount_dir))\n- choice = raw_input().lower()\n- if not choice or choice == 'y' or choice == 'yes':\n- for c in commands:\n- # TODO(romaing) this is quite unsafe, need to properly split args\n- subprocess.call(c, shell=True)\n \n class AufsStorage(BaseStorage):\n \"\"\"This class implements AuFS storage specific methods.\"\"\"\ndiff --git a/docker_explorer/lib/utils.py b/docker_explorer/lib/utils.py\nindex b4e9db3..cdfb2b8 100644\n--- a/docker_explorer/lib/utils.py\n+++ b/docker_explorer/lib/utils.py\n@@ -44,5 +44,6 @@ def PrettyPrintJSON(string):\n Returns:\n str: pretty printed JSON string.\n \"\"\"\n- return json.dumps(\n+ pretty_json = json.dumps(\n json.loads(string), sort_keys=True, indent=4, separators=(', ', ': '))\n+ return pretty_json + '\\n'\n"},"problem_statement":{"kind":"string","value":"Better detect overlay/overlay2 layouts\nWhen processing an image that has both and \"overlay2\" and \"overlay\" folder it will take the \"overlay2\" folder by default even though there is no 'mounts' subfolder."},"repo":{"kind":"string","value":"google/docker-explorer"},"test_patch":{"kind":"string","value":"diff --git a/tests.py b/tests.py\nindex f7a79f4..99ca551 100644\n--- a/tests.py\n+++ b/tests.py\n@@ -47,7 +47,7 @@ class UtilsTests(unittest.TestCase):\n test_json = json.dumps(test_dict)\n expected_string = ('{\\n \"test\": [\\n {\\n \"dict1\": {\\n'\n ' \"key1\": \"val1\"\\n }, \\n'\n- ' \"dict2\": null\\n }\\n ]\\n}')\n+ ' \"dict2\": null\\n }\\n ]\\n}\\n')\n self.assertEqual(expected_string, utils.PrettyPrintJSON(test_json))\n \n \n@@ -89,8 +89,8 @@ class TestDEMain(unittest.TestCase):\n self.assertEqual(expected_error_message, err.exception.message)\n \n \n-class StorageTestCase(unittest.TestCase):\n- \"\"\"Base class for tests of different BaseStorage implementations.\"\"\"\n+class DockerTestCase(unittest.TestCase):\n+ \"\"\"Base class for tests of different Storage implementations.\"\"\"\n \n @classmethod\n def tearDownClass(cls):\n@@ -98,7 +98,7 @@ class StorageTestCase(unittest.TestCase):\n \n @classmethod\n def _setup(cls, driver, driver_class):\n- \"\"\"Internal method to set up the TestCase on a specific storate.\"\"\"\n+ \"\"\"Internal method to set up the TestCase on a specific storage.\"\"\"\n cls.driver = driver\n docker_directory_path = os.path.join('test_data', 'docker')\n if not os.path.isdir(docker_directory_path):\n@@ -113,17 +113,17 @@ class StorageTestCase(unittest.TestCase):\n \n def testDetectStorage(self):\n \"\"\"Tests the DockerExplorer.DetectStorage function.\"\"\"\n- storage_object = self.de_object.storage_object\n- self.assertIsNotNone(storage_object)\n- self.assertIsInstance(storage_object, self.driver_class)\n- self.assertEqual(storage_object.STORAGE_METHOD, self.driver)\n+ for container_obj in self.de_object.GetAllContainers():\n+ self.assertIsNotNone(container_obj.storage_object)\n+ self.assertEqual(container_obj.storage_name, self.driver)\n+ self.assertIsInstance(container_obj.storage_object, self.driver_class)\n \n- self.assertEqual(2, storage_object.docker_version)\n- self.assertEqual('config.v2.json',\n- self.de_object.container_config_filename)\n+ self.assertEqual(2, container_obj.docker_version)\n+ self.assertEqual(\n+ 'config.v2.json', container_obj.container_config_filename)\n \n \n-class TestAufsStorage(StorageTestCase):\n+class TestAufsStorage(DockerTestCase):\n \"\"\"Tests methods in the BaseStorage object.\"\"\"\n \n @classmethod\n@@ -198,12 +198,13 @@ class TestAufsStorage(StorageTestCase):\n self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD [\"sh\"]'],\n layer_info['container_config']['Cmd'])\n \n- def testShowRepositories(self):\n- \"\"\"Tests the BaseStorage.ShowRepositories function on a AUFS storage.\"\"\"\n- result_string = self.de_object.storage_object.ShowRepositories()\n+ def testGetRepositoriesString(self):\n+ \"\"\"Tests BaseStorage.GetRepositoriesString() on a AUFS storage.\"\"\"\n+ self.maxDiff = None\n+ result_string = self.de_object.GetRepositoriesString()\n expected_string = (\n 'Listing repositories from file '\n- 'test_data/docker/image/aufs/repositories.json{\\n'\n+ 'test_data/docker/image/aufs/repositories.json\\n{\\n'\n ' \"Repositories\": {\\n'\n ' \"busybox\": {\\n'\n ' \"busybox:latest\": '\n@@ -211,7 +212,7 @@ class TestAufsStorage(StorageTestCase):\n '68\"\\n'\n ' }\\n'\n ' }\\n'\n- '}')\n+ '}\\n')\n self.assertEqual(expected_string, result_string)\n \n def testMakeMountCommands(self):\n@@ -219,7 +220,7 @@ class TestAufsStorage(StorageTestCase):\n container_id = (\n '7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')\n container_obj = self.de_object.GetContainer(container_id)\n- commands = self.de_object.storage_object.MakeMountCommands(\n+ commands = container_obj.storage_object.MakeMountCommands(\n container_obj, '/mnt')\n expected_commands = [\n ('mount -t aufs -o ro,br=test_data/docker/aufs/diff/test_data/docker/'\n@@ -253,7 +254,7 @@ class TestAufsStorage(StorageTestCase):\n self.assertEqual(expected_string, container_obj.GetHistory())\n \n \n-class TestOverlayStorage(StorageTestCase):\n+class TestOverlayStorage(DockerTestCase):\n \"\"\"Tests methods in the OverlayStorage object.\"\"\"\n \n @classmethod\n@@ -329,13 +330,13 @@ class TestOverlayStorage(StorageTestCase):\n self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD [\"sh\"]'],\n layer_info['container_config']['Cmd'])\n \n- def testShowRepositories(self):\n- \"\"\"Tests the BaseStorage.ShowRepositories function on a Overlay storage.\"\"\"\n- result_string = self.de_object.storage_object.ShowRepositories()\n+ def testGetRepositoriesString(self):\n+ \"\"\"Tests BaseStorage.GetRepositoriesString() on a Overlay storage.\"\"\"\n+ result_string = self.de_object.GetRepositoriesString()\n self.maxDiff = None\n expected_string = (\n 'Listing repositories from file '\n- 'test_data/docker/image/overlay/repositories.json{\\n'\n+ 'test_data/docker/image/overlay/repositories.json\\n{\\n'\n ' \"Repositories\": {\\n'\n ' \"busybox\": {\\n'\n ' \"busybox:latest\": \"sha256:'\n@@ -346,7 +347,7 @@ class TestOverlayStorage(StorageTestCase):\n '2c3\"\\n'\n ' }\\n'\n ' }\\n'\n- '}')\n+ '}\\n')\n self.assertEqual(expected_string, result_string)\n \n def testMakeMountCommands(self):\n@@ -354,7 +355,7 @@ class TestOverlayStorage(StorageTestCase):\n container_id = (\n '5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')\n container_obj = self.de_object.GetContainer(container_id)\n- commands = self.de_object.storage_object.MakeMountCommands(\n+ commands = container_obj.storage_object.MakeMountCommands(\n container_obj, '/mnt')\n expected_commands = [(\n 'mount -t overlay overlay -o ro,lowerdir='\n@@ -381,7 +382,7 @@ class TestOverlayStorage(StorageTestCase):\n self.assertEqual(expected_string, container_obj.GetHistory())\n \n \n-class TestOverlay2Storage(StorageTestCase):\n+class TestOverlay2Storage(DockerTestCase):\n \"\"\"Tests methods in the Overlay2Storage object.\"\"\"\n \n @classmethod\n@@ -457,13 +458,13 @@ class TestOverlay2Storage(StorageTestCase):\n self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD [\"sh\"]'],\n layer_info['container_config']['Cmd'])\n \n- def testShowRepositories(self):\n- \"\"\"Tests the BaseStorage.ShowRepositories function on a Overlay2 storage.\"\"\"\n- result_string = self.de_object.storage_object.ShowRepositories()\n+ def testGetRepositoriesString(self):\n+ \"\"\"Tests BaseStorage.GetRepositoriesString() on a Overlay2 storage.\"\"\"\n+ result_string = self.de_object.GetRepositoriesString()\n self.maxDiff = None\n expected_string = (\n 'Listing repositories from file '\n- 'test_data/docker/image/overlay2/repositories.json{\\n'\n+ 'test_data/docker/image/overlay2/repositories.json\\n{\\n'\n ' \"Repositories\": {\\n'\n ' \"busybox\": {\\n'\n ' \"busybox:latest\": \"sha256:'\n@@ -474,7 +475,14 @@ class TestOverlay2Storage(StorageTestCase):\n 'c7\"\\n'\n ' }\\n'\n ' }\\n'\n- '}')\n+ '}\\n'\n+ 'Listing repositories from file '\n+ 'test_data/docker/image/overlay/repositories.json\\n'\n+ '{\\n'\n+ ' \"Repositories\": {}\\n'\n+ '}\\n'\n+\n+ )\n self.assertEqual(expected_string, result_string)\n \n def testMakeMountCommands(self):\n@@ -483,7 +491,7 @@ class TestOverlay2Storage(StorageTestCase):\n container_id = (\n '8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')\n container_obj = self.de_object.GetContainer(container_id)\n- commands = self.de_object.storage_object.MakeMountCommands(\n+ commands = container_obj.storage_object.MakeMountCommands(\n container_obj, '/mnt')\n expected_commands = [(\n 'mount -t overlay overlay -o ro,lowerdir='\n@@ -511,7 +519,7 @@ class TestOverlay2Storage(StorageTestCase):\n 'with command : /bin/sh -c #(nop) CMD [\"sh\"]')\n self.assertEqual(expected_string, container_obj.GetHistory(container_obj))\n \n-del StorageTestCase\n+del DockerTestCase\n \n if __name__ == '__main__':\n unittest.main()\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 0\n },\n \"num_modified_files\": 4\n}"},"version":{"kind":"string","value":"unknown"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"pytest\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.9\",\n \"reqs_path\": null,\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"-e git+https://github.com/google/docker-explorer.git@80376ae9503280241d6c14838a69d026f0987da9#egg=docker_explorer\nexceptiongroup @ file:///croot/exceptiongroup_1706031385326/work\niniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work\npackaging @ file:///croot/packaging_1734472117206/work\npluggy @ file:///croot/pluggy_1733169602837/work\npytest @ file:///croot/pytest_1738938843180/work\ntomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work\n"},"environment":{"kind":"string","value":"name: docker-explorer\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - exceptiongroup=1.2.0=py39h06a4308_0\n - iniconfig=1.1.1=pyhd3eb1b0_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - packaging=24.2=py39h06a4308_0\n - pip=25.0=py39h06a4308_0\n - pluggy=1.5.0=py39h06a4308_0\n - pytest=8.3.4=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tomli=2.0.1=py39h06a4308_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\nprefix: /opt/conda/envs/docker-explorer\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests.py::UtilsTests::testPrettyPrintJSON","tests.py::TestAufsStorage::testDetectStorage","tests.py::TestAufsStorage::testGetRepositoriesString","tests.py::TestAufsStorage::testMakeMountCommands","tests.py::TestOverlayStorage::testDetectStorage","tests.py::TestOverlayStorage::testGetRepositoriesString","tests.py::TestOverlayStorage::testMakeMountCommands","tests.py::TestOverlay2Storage::testDetectStorage","tests.py::TestOverlay2Storage::testMakeMountCommands"],"string":"[\n \"tests.py::UtilsTests::testPrettyPrintJSON\",\n \"tests.py::TestAufsStorage::testDetectStorage\",\n \"tests.py::TestAufsStorage::testGetRepositoriesString\",\n \"tests.py::TestAufsStorage::testMakeMountCommands\",\n \"tests.py::TestOverlayStorage::testDetectStorage\",\n \"tests.py::TestOverlayStorage::testGetRepositoriesString\",\n \"tests.py::TestOverlayStorage::testMakeMountCommands\",\n \"tests.py::TestOverlay2Storage::testDetectStorage\",\n \"tests.py::TestOverlay2Storage::testMakeMountCommands\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["tests.py::TestOverlay2Storage::testGetRepositoriesString"],"string":"[\n \"tests.py::TestOverlay2Storage::testGetRepositoriesString\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["tests.py::UtilsTests::testFormatDatetime","tests.py::TestDEMain::testDetectStorageFail","tests.py::TestDEMain::testParseArguments","tests.py::TestAufsStorage::testGetAllContainers","tests.py::TestAufsStorage::testGetContainersString","tests.py::TestAufsStorage::testGetHistory","tests.py::TestAufsStorage::testGetLayerInfo","tests.py::TestAufsStorage::testGetOrderedLayers","tests.py::TestAufsStorage::testGetRunningContainersList","tests.py::TestOverlayStorage::testGetAllContainers","tests.py::TestOverlayStorage::testGetContainersString","tests.py::TestOverlayStorage::testGetHistory","tests.py::TestOverlayStorage::testGetLayerInfo","tests.py::TestOverlayStorage::testGetOrderedLayers","tests.py::TestOverlayStorage::testGetRunningContainersList","tests.py::TestOverlay2Storage::testGetAllContainers","tests.py::TestOverlay2Storage::testGetContainersString","tests.py::TestOverlay2Storage::testGetHistory","tests.py::TestOverlay2Storage::testGetLayerInfo","tests.py::TestOverlay2Storage::testGetOrderedLayers","tests.py::TestOverlay2Storage::testGetRunningContainersList"],"string":"[\n \"tests.py::UtilsTests::testFormatDatetime\",\n \"tests.py::TestDEMain::testDetectStorageFail\",\n \"tests.py::TestDEMain::testParseArguments\",\n \"tests.py::TestAufsStorage::testGetAllContainers\",\n \"tests.py::TestAufsStorage::testGetContainersString\",\n \"tests.py::TestAufsStorage::testGetHistory\",\n \"tests.py::TestAufsStorage::testGetLayerInfo\",\n \"tests.py::TestAufsStorage::testGetOrderedLayers\",\n \"tests.py::TestAufsStorage::testGetRunningContainersList\",\n \"tests.py::TestOverlayStorage::testGetAllContainers\",\n \"tests.py::TestOverlayStorage::testGetContainersString\",\n \"tests.py::TestOverlayStorage::testGetHistory\",\n \"tests.py::TestOverlayStorage::testGetLayerInfo\",\n \"tests.py::TestOverlayStorage::testGetOrderedLayers\",\n \"tests.py::TestOverlayStorage::testGetRunningContainersList\",\n \"tests.py::TestOverlay2Storage::testGetAllContainers\",\n \"tests.py::TestOverlay2Storage::testGetContainersString\",\n \"tests.py::TestOverlay2Storage::testGetHistory\",\n \"tests.py::TestOverlay2Storage::testGetLayerInfo\",\n \"tests.py::TestOverlay2Storage::testGetOrderedLayers\",\n \"tests.py::TestOverlay2Storage::testGetRunningContainersList\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":2657,"string":"2,657"},"num_tokens_patch":{"kind":"number","value":3079,"string":"3,079"},"before_filepaths":{"kind":"list like","value":["docker_explorer/de.py","docker_explorer/lib/container.py","docker_explorer/lib/storage.py","docker_explorer/lib/utils.py"],"string":"[\n \"docker_explorer/de.py\",\n \"docker_explorer/lib/container.py\",\n \"docker_explorer/lib/storage.py\",\n \"docker_explorer/lib/utils.py\"\n]"}}},{"rowIdx":599,"cells":{"instance_id":{"kind":"string","value":"AmiiThinks__driving_gridworld-13"},"base_commit":{"kind":"string","value":"fbc47c68cfade4e7d95ba59a3990dfef196389a6"},"created_at":{"kind":"string","value":"2018-06-12 21:08:06"},"environment_setup_commit":{"kind":"string","value":"fbc47c68cfade4e7d95ba59a3990dfef196389a6"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/driving_gridworld/road.py b/driving_gridworld/road.py\nindex cb519ef..559362f 100644\n--- a/driving_gridworld/road.py\n+++ b/driving_gridworld/road.py\n@@ -142,13 +142,12 @@ def combinations(iterable, r, collection=tuple):\n \n \n class Road(object):\n- def __init__(self, num_rows, car, obstacles, speed_limit):\n- if speed_limit < car.speed:\n+ def __init__(self, num_rows, car, obstacles):\n+ if num_rows + 1 < car.speed:\n raise ValueError(\"Car's speed above speed limit!\")\n self._num_rows = num_rows\n self._num_columns = 4\n self._car = car\n- self._speed_limit = speed_limit\n self._obstacles = obstacles\n self._available_spaces = {}\n for pos in product(range(0, self._car.speed), range(4)):\n@@ -159,6 +158,20 @@ class Road(object):\n if disallowed_position in self._available_spaces:\n del self._available_spaces[disallowed_position]\n \n+ def speed_limit(self):\n+ '''The hard speed limit on this road.\n+\n+ Taking the `UP` action when traveling at the speed limit has no effect.\n+\n+ Set according to the headlight range since overdriving the\n+ headlights too much breaks the physical plausibility of the game\n+ due to the way we reusing obstacles to simulate arbitrarily long\n+ roads with many obstacles. This is not too much of a restriction\n+ though because even overdriving the headlights by one unit is\n+ completely unsafe.\n+ '''\n+ return self._num_rows + 1\n+\n def obstacle_outside_car_path(self, obstacle):\n return (obstacle.col < 0 or obstacle.col >= self._num_columns\n or obstacle.row >= self._num_rows)\n@@ -198,7 +211,7 @@ class Road(object):\n state. The reward function is deterministic.\n '''\n \n- next_car = self._car.next(action, self._speed_limit)\n+ next_car = self._car.next(action, self.speed_limit())\n \n for positions, reveal_indices in (\n self.every_combination_of_revealed_obstacles()):\n@@ -225,8 +238,7 @@ class Road(object):\n reward += self._car.reward()\n if self._car.col == 0 or self._car.col == 3:\n reward -= 4 * self._car.speed\n- next_road = self.__class__(self._num_rows, next_car,\n- next_obstacles, self._speed_limit)\n+ next_road = self.__class__(self._num_rows, next_car, next_obstacles)\n yield (next_road, prob, reward)\n \n def to_key(self, show_walls=False):\n"},"problem_statement":{"kind":"string","value":"Enforce a hard limit on the speed limit in `Road` to the number of rows + 1\nIf the speed limit is larger than this, then the physical plausibility of the similar breaks, because the number of possible obstacle encounters across a fixed distance can depend on the car's speed and the range of its headlights (the number of rows)."},"repo":{"kind":"string","value":"AmiiThinks/driving_gridworld"},"test_patch":{"kind":"string","value":"diff --git a/test/road_test.py b/test/road_test.py\nindex ae22a47..d8aeb36 100644\n--- a/test/road_test.py\n+++ b/test/road_test.py\n@@ -9,9 +9,8 @@ import pytest\n def test_transition_probs_without_obstacles_are_always_1():\n num_rows = 4\n obstacles = []\n- speed_limit = 1\n car_inst = Car(0, 0, 1)\n- road_test = Road(num_rows, car_inst, obstacles, speed_limit)\n+ road_test = Road(num_rows, car_inst, obstacles)\n \n for a in ACTIONS:\n for next_state, prob, reward in road_test.successors(a):\n@@ -21,9 +20,7 @@ def test_transition_probs_without_obstacles_are_always_1():\n @pytest.mark.parametrize(\"obst\", [Bump(0, 0), Pedestrian(0, 0)])\n def test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road(obst):\n num_rows = 2\n- speed_limit = 1\n-\n- road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit)\n+ road_test = Road(num_rows, Car(1, 1, 1), [obst])\n patient = [\n (positions, reveal_indices)\n for positions, reveal_indices in\n@@ -36,9 +33,7 @@ def test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_\n @pytest.mark.parametrize(\"action\", ACTIONS)\n def test_transition_probs_with_one_obstacle_are_1(obst, action):\n num_rows = 2\n- speed_limit = 1\n-\n- road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit)\n+ road_test = Road(num_rows, Car(1, 1, 1), [obst])\n probs = [\n prob\n for next_state, prob, reward in road_test.successors(action)\n@@ -50,9 +45,7 @@ def test_transition_probs_with_one_obstacle_are_1(obst, action):\n @pytest.mark.parametrize(\"action\", ACTIONS)\n def test_transition_probs_with_invisible_obstacle(obst, action):\n num_rows = 2\n- speed_limit = 1\n-\n- road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit)\n+ road_test = Road(num_rows, Car(1, 1, 1), [obst])\n probs = [\n prob\n for next_state, prob, reward in road_test.successors(action)\n@@ -72,9 +65,8 @@ def test_transition_probs_with_invisible_obstacle(obst, action):\n def test_driving_faster_gives_a_larger_reward(action, current_speed):\n num_rows = 4\n obstacles = []\n- speed_limit = 4\n car = Car(0, 1, current_speed)\n- road_test = Road(num_rows, car, obstacles, speed_limit)\n+ road_test = Road(num_rows, car, obstacles)\n for next_state, prob, reward in road_test.successors(action):\n assert reward == float(current_speed)\n \n@@ -82,12 +74,10 @@ def test_driving_faster_gives_a_larger_reward(action, current_speed):\n def test_road_cannot_start_with_car_going_faster_than_speed_limit():\n num_rows = 4\n obstacles = []\n- speed_limit = 1\n- current_speed = 2\n+ current_speed = 6\n car = Car(0, 0, current_speed)\n-\n with pytest.raises(ValueError):\n- road_test = Road(num_rows, car, obstacles, speed_limit)\n+ road_test = Road(num_rows, car, obstacles)\n \n \n @pytest.mark.parametrize(\"car\", [Car(0, 0, 1), Car(0, 3, 1)])\n@@ -95,20 +85,28 @@ def test_road_cannot_start_with_car_going_faster_than_speed_limit():\n def test_receive_negative_reward_for_driving_off_the_road(car, action):\n num_rows = 4\n obstacles = []\n- speed_limit = 2\n- road_test = Road(num_rows, car, obstacles, speed_limit)\n+ road_test = Road(num_rows, car, obstacles)\n for next_state, prob, reward in road_test.successors(action):\n assert reward < 0\n+\n+\n @pytest.mark.parametrize(\"obst\", [Bump(-1, -1), Pedestrian(0, -1)])\n @pytest.mark.parametrize(\"action\", ACTIONS)\n @pytest.mark.parametrize(\"speed\", [1, 2, 3])\n def test_number_of_successors_invisible_obstacle_and_variable_speeds(\n obst, action, speed):\n num_rows = 2\n- speed_limit = 3\n- road_test = Road(num_rows, Car(1, 1, speed), [obst], speed_limit)\n+ road_test = Road(num_rows, Car(1, 1, speed), [obst])\n probs = [\n prob\n for next_state, prob, reward in road_test.successors(action)\n ]\n assert len(probs) == 4 * speed + 1\n+\n+\n+def test_speed_limit_equals_number_of_rows_plus_one():\n+ num_rows = 2\n+ obstacles = []\n+ car = Car(0, 0, 1)\n+ road_test = Road(num_rows, car, obstacles)\n+ assert road_test.speed_limit() == num_rows + 1\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"merge_commit\",\n \"failed_lite_validators\": [\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 0,\n \"test_score\": 0\n },\n \"num_modified_files\": 1\n}"},"version":{"kind":"string","value":"unknown"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .[dev]\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\",\n \"pytest-cov\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"coverage==7.8.0\n-e git+https://github.com/AmiiThinks/driving_gridworld.git@fbc47c68cfade4e7d95ba59a3990dfef196389a6#egg=driving_gridworld\nexceptiongroup==1.2.2\nfire==0.7.0\nfuture==0.15.2\niniconfig==2.1.0\nnumpy==2.0.2\npackaging==24.2\npluggy==1.5.0\npycolab==1.2\npytest==8.3.5\npytest-cov==6.0.0\nsix==1.17.0\ntermcolor==3.0.0\ntomli==2.2.1\n"},"environment":{"kind":"string","value":"name: driving_gridworld\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - coverage==7.8.0\n - exceptiongroup==1.2.2\n - fire==0.7.0\n - future==0.15.2\n - iniconfig==2.1.0\n - numpy==2.0.2\n - packaging==24.2\n - pluggy==1.5.0\n - pycolab==1.2\n - pytest==8.3.5\n - pytest-cov==6.0.0\n - six==1.17.0\n - termcolor==3.0.0\n - tomli==2.2.1\nprefix: /opt/conda/envs/driving_gridworld\n"},"FAIL_TO_PASS":{"kind":"list like","value":["test/road_test.py::test_transition_probs_without_obstacles_are_always_1","test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst0]","test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst1]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst0]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst1]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst0]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst1]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst0]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst1]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst0]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst1]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst0]","test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst1]","test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst0]","test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst1]","test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst0]","test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst1]","test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst0]","test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst1]","test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst0]","test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst1]","test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst0]","test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst1]","test/road_test.py::test_driving_faster_gives_a_larger_reward[1-0]","test/road_test.py::test_driving_faster_gives_a_larger_reward[1-1]","test/road_test.py::test_driving_faster_gives_a_larger_reward[1-2]","test/road_test.py::test_driving_faster_gives_a_larger_reward[1-3]","test/road_test.py::test_driving_faster_gives_a_larger_reward[1-4]","test/road_test.py::test_driving_faster_gives_a_larger_reward[2-0]","test/road_test.py::test_driving_faster_gives_a_larger_reward[2-1]","test/road_test.py::test_driving_faster_gives_a_larger_reward[2-2]","test/road_test.py::test_driving_faster_gives_a_larger_reward[2-3]","test/road_test.py::test_driving_faster_gives_a_larger_reward[2-4]","test/road_test.py::test_driving_faster_gives_a_larger_reward[3-0]","test/road_test.py::test_driving_faster_gives_a_larger_reward[3-1]","test/road_test.py::test_driving_faster_gives_a_larger_reward[3-2]","test/road_test.py::test_driving_faster_gives_a_larger_reward[3-3]","test/road_test.py::test_driving_faster_gives_a_larger_reward[3-4]","test/road_test.py::test_driving_faster_gives_a_larger_reward[4-0]","test/road_test.py::test_driving_faster_gives_a_larger_reward[4-1]","test/road_test.py::test_driving_faster_gives_a_larger_reward[4-2]","test/road_test.py::test_driving_faster_gives_a_larger_reward[4-3]","test/road_test.py::test_driving_faster_gives_a_larger_reward[4-4]","test/road_test.py::test_road_cannot_start_with_car_going_faster_than_speed_limit","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car0]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car1]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car0]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car1]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car0]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car1]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car0]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car1]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car0]","test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst1]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst0]","test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst1]","test/road_test.py::test_speed_limit_equals_number_of_rows_plus_one"],"string":"[\n \"test/road_test.py::test_transition_probs_without_obstacles_are_always_1\",\n \"test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst0]\",\n \"test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst1]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst0]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst1]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst0]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst1]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst0]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst1]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst0]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst1]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst0]\",\n \"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst1]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst0]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst1]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst0]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst1]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst0]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst1]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst0]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst1]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst0]\",\n \"test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst1]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-0]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-1]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-2]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-3]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-4]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-0]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-1]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-2]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-3]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-4]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-0]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-1]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-2]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-3]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-4]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-0]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-1]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-2]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-3]\",\n \"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-4]\",\n \"test/road_test.py::test_road_cannot_start_with_car_going_faster_than_speed_limit\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car0]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car1]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car0]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car1]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car0]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car1]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car0]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car1]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car0]\",\n \"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst1]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst0]\",\n \"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst1]\",\n \"test/road_test.py::test_speed_limit_equals_number_of_rows_plus_one\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"MIT License"},"__index_level_0__":{"kind":"number","value":2659,"string":"2,659"},"num_tokens_patch":{"kind":"number","value":653,"string":"653"},"before_filepaths":{"kind":"list like","value":["driving_gridworld/road.py"],"string":"[\n \"driving_gridworld/road.py\"\n]"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":5,"numItemsPerPage":100,"numTotalItems":4150,"offset":500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODU5NDQzMCwic3ViIjoiL2RhdGFzZXRzL3Jhc2RhbmkvU1dFLXJlYmVuY2gtZWFzeSIsImV4cCI6MTc1ODU5ODAzMCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.8L2zuqMenK2m2Mtg27bBE0giiLwKVhd-QvljMDsMNKMcZoAr64ZpqTo8wHgK6Qb9WKgk7F7oDN0vLbz_Esg2CQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
diff --git a/ioccontainer/inject.py b/ioccontainer/inject.py
index 162f6e1..98ba622 100644
--- a/ioccontainer/inject.py
+++ b/ioccontainer/inject.py
@@ -50,7 +50,11 @@ def inject_decorator(container: 'Container'):
service = container.get(cls)
if _is_positional_argument(position, parameter, new_args):
- new_args.append(service)
+ if len(new_args) >= position + 1:
+ new_args[position] = service
+ else:
+ new_args.append(service)
+
elif _is_keyword_argument(parameter):
kwargs[parameter.name] = service
else:
@@ -96,7 +100,9 @@ def _default_parameter_provided(parameter: inspect.Parameter) -> bool:
def _argument_provided(position: int, parameter: inspect.Parameter,
args: typing.List, kwargs: typing.Dict) -> bool:
- return position < len(args) or parameter.name in kwargs.keys()
+ if position < len(args) and args[position] is not None:
+ return True
+ return kwargs.get(parameter.name) is not None
def _is_positional_argument(
@@ -106,7 +112,9 @@ def _is_positional_argument(
inspect.Parameter.POSITIONAL_OR_KEYWORD)
if parameter.kind not in positional_types:
return False
- return position == len(args)
+ if position == len(args):
+ return True
+ return position + 1 == len(args) and args[position] is None
def _is_keyword_argument(parameter: inspect.Parameter) -> bool:
Inject should override value if None
When using the `@inject` decorator, the provided parameter should be overridden if it is `None`. At the moment the provider parameter takes precedence 100% of the time.
diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 2327081a..79bb8ee1 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -836,11 +836,11 @@ class BaseSchema(base.SchemaABC):
if pass_many:
validator = functools.partial(validator, many=many)
if many and not pass_many:
- for idx, item in enumerate(data):
+ for idx, (item, orig) in enumerate(zip(data, original_data)):
try:
- self._unmarshal.run_validator(validator,
- item, original_data, self.fields, many=many,
- index=idx, pass_original=pass_original)
+ self._unmarshal.run_validator(
+ validator, item, orig, self.fields, many=many,
+ index=idx, pass_original=pass_original)
except ValidationError as err:
errors.update(err.messages)
else:
post_dump is passing a list of objects as original object
Hi,
I think post_dump with pass_original=True should pass the original object related to the data serialized and not a list of objects which this object belongs to.
``` python
from marshmallow import fields, post_dump, Schema
class DeviceSchema(Schema):
id = fields.String()
@post_dump(pass_original=True)
def __post_dump(self, data, obj):
print(obj) # <-- this is a list
devices = [dict(id=1), dict(id=2)]
DeviceSchema().dump(devices, many=True)
```
In the above example, the parameter `obj` is a list of devices rather than the device object itself.
What do you think?
diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py
index ca83a58..666cd11 100644
--- a/pre_commit/commands/autoupdate.py
+++ b/pre_commit/commands/autoupdate.py
@@ -33,9 +33,9 @@ def _update_repo(repo_config, runner, tags_only):
Args:
repo_config - A config for a repository
"""
- repo = Repository.create(repo_config, runner.store)
+ repo_path = runner.store.clone(repo_config['repo'], repo_config['sha'])
- with cwd(repo._repo_path):
+ with cwd(repo_path):
cmd_output('git', 'fetch')
tag_cmd = ('git', 'describe', 'origin/master', '--tags')
if tags_only:
@@ -57,7 +57,7 @@ def _update_repo(repo_config, runner, tags_only):
new_repo = Repository.create(new_config, runner.store)
# See if any of our hooks were deleted with the new commits
- hooks = {hook['id'] for hook in repo.repo_config['hooks']}
+ hooks = {hook['id'] for hook in repo_config['hooks']}
hooks_missing = hooks - (hooks & set(new_repo.manifest_hooks))
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
diff --git a/pre_commit/repository.py b/pre_commit/repository.py
index 3ed160a..624ccd0 100644
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -7,7 +7,6 @@ import os
import pipes
import shutil
import sys
-from collections import defaultdict
import pkg_resources
from cached_property import cached_property
@@ -149,22 +148,11 @@ class Repository(object):
else:
return cls(config, store)
- @cached_property
- def _repo_path(self):
- return self.store.clone(
- self.repo_config['repo'], self.repo_config['sha'],
- )
-
- @cached_property
- def _prefix(self):
- return Prefix(self._repo_path)
-
- def _prefix_from_deps(self, language_name, deps):
- return self._prefix
-
@cached_property
def manifest_hooks(self):
- manifest_path = os.path.join(self._repo_path, C.MANIFEST_FILE)
+ repo, sha = self.repo_config['repo'], self.repo_config['sha']
+ repo_path = self.store.clone(repo, sha)
+ manifest_path = os.path.join(repo_path, C.MANIFEST_FILE)
return {hook['id']: hook for hook in load_manifest(manifest_path)}
@cached_property
@@ -185,21 +173,25 @@ class Repository(object):
for hook in self.repo_config['hooks']
)
- @cached_property
+ def _prefix_from_deps(self, language_name, deps):
+ repo, sha = self.repo_config['repo'], self.repo_config['sha']
+ return Prefix(self.store.clone(repo, sha, deps))
+
def _venvs(self):
- deps_dict = defaultdict(_UniqueList)
- for _, hook in self.hooks:
- deps_dict[(hook['language'], hook['language_version'])].update(
- hook['additional_dependencies'],
- )
ret = []
- for (language, version), deps in deps_dict.items():
- ret.append((self._prefix, language, version, deps))
+ for _, hook in self.hooks:
+ language = hook['language']
+ version = hook['language_version']
+ deps = hook['additional_dependencies']
+ ret.append((
+ self._prefix_from_deps(language, deps),
+ language, version, deps,
+ ))
return tuple(ret)
def require_installed(self):
if not self.__installed:
- _install_all(self._venvs, self.repo_config['repo'], self.store)
+ _install_all(self._venvs(), self.repo_config['repo'], self.store)
self.__installed = True
def run_hook(self, hook, file_args):
@@ -237,19 +229,6 @@ class LocalRepository(Repository):
for hook in self.repo_config['hooks']
)
- @cached_property
- def _venvs(self):
- ret = []
- for _, hook in self.hooks:
- language = hook['language']
- version = hook['language_version']
- deps = hook['additional_dependencies']
- ret.append((
- self._prefix_from_deps(language, deps),
- language, version, deps,
- ))
- return tuple(ret)
-
class MetaRepository(LocalRepository):
@cached_property
@@ -303,14 +282,3 @@ class MetaRepository(LocalRepository):
(hook['id'], _hook(self.manifest_hooks[hook['id']], hook))
for hook in self.repo_config['hooks']
)
-
-
-class _UniqueList(list):
- def __init__(self):
- self._set = set()
-
- def update(self, obj):
- for item in obj:
- if item not in self._set:
- self._set.add(item)
- self.append(item)
diff --git a/pre_commit/store.py b/pre_commit/store.py
index 1311984..7e49c8f 100644
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -72,9 +72,9 @@ class Store(object):
with contextlib.closing(sqlite3.connect(tmpfile)) as db:
db.executescript(
'CREATE TABLE repos ('
- ' repo CHAR(255) NOT NULL,'
- ' ref CHAR(255) NOT NULL,'
- ' path CHAR(255) NOT NULL,'
+ ' repo TEXT NOT NULL,'
+ ' ref TEXT NOT NULL,'
+ ' path TEXT NOT NULL,'
' PRIMARY KEY (repo, ref)'
');',
)
@@ -101,15 +101,17 @@ class Store(object):
self._create()
self.__created = True
- def _new_repo(self, repo, ref, make_strategy):
+ def _new_repo(self, repo, ref, deps, make_strategy):
self.require_created()
+ if deps:
+ repo = '{}:{}'.format(repo, ','.join(sorted(deps)))
def _get_result():
# Check if we already exist
with sqlite3.connect(self.db_path) as db:
result = db.execute(
'SELECT path FROM repos WHERE repo = ? AND ref = ?',
- [repo, ref],
+ (repo, ref),
).fetchone()
if result:
return result[0]
@@ -137,7 +139,7 @@ class Store(object):
)
return directory
- def clone(self, repo, ref):
+ def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
def clone_strategy(directory):
cmd_output(
@@ -151,7 +153,7 @@ class Store(object):
env=no_git_env(),
)
- return self._new_repo(repo, ref, clone_strategy)
+ return self._new_repo(repo, ref, deps, clone_strategy)
def make_local(self, deps):
def make_local_strategy(directory):
@@ -172,8 +174,7 @@ class Store(object):
_git_cmd('commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
return self._new_repo(
- 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,
- make_local_strategy,
+ 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,
)
@cached_property
Same repo but different additional dependencies
I have two Git projects `A` and `B`, and those two use same [pre-commit repo](https://github.com/coldnight/pre-commit-pylint). But those two projects use different additional dependencies:
`.pre-commit-config.yaml` in `A`:
```yaml
- repo: [email protected]:coldnight/pre-commit-pylint.git
sha: 630e2662aabf3236fc62460b163d613c4bd1cfbc
hooks:
- id: pylint-py3k
- id: pylint-score-limit
args:
- --limit=8.5
- --rcfile=./.pylintrc
additional_dependencies:
- enum34; python_version<='3.4'
- mock
```
`.pre-commit-config.yaml` in `B`:
```yaml
- repo: [email protected]:coldnight/pre-commit-pylint.git
sha: 630e2662aabf3236fc62460b163d613c4bd1cfbc
hooks:
- id: pylint-py3k
- id: pylint-score-limit
args:
- --limit=8.5
- --rcfile=./.pylintrc
additional_dependencies:
- enum34; python_version<='3.4'
- requests
```
Here is my problem:
1. First I run `pre-commit` in project `A`, and the environment has installed
2. And then I run `pre-commit` in project `B`, and the environment has installed
3. When back project `A` and run `pre-commit`, the installed environment has removed by above and need another installation(Huge slow!)
Any idea for this? Support different projects that use different home directories?
pre-commit/pre-commit
diff --git a/tests/conftest.py b/tests/conftest.py
index fd3784d..246820e 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -165,12 +165,6 @@ def log_info_mock():
yield mck
[email protected]
-def log_warning_mock():
- with mock.patch.object(logging.getLogger('pre_commit'), 'warning') as mck:
- yield mck
-
-
class FakeStream(object):
def __init__(self):
self.data = io.BytesIO()
diff --git a/tests/repository_test.py b/tests/repository_test.py
index 0123ce4..dea387f 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -433,7 +433,7 @@ def test_venvs(tempdir_factory, store):
path = make_repo(tempdir_factory, 'python_hooks_repo')
config = make_config_from_repo(path)
repo = Repository.create(config, store)
- venv, = repo._venvs
+ venv, = repo._venvs()
assert venv == (mock.ANY, 'python', python.get_default_version(), [])
@@ -443,50 +443,33 @@ def test_additional_dependencies(tempdir_factory, store):
config = make_config_from_repo(path)
config['hooks'][0]['additional_dependencies'] = ['pep8']
repo = Repository.create(config, store)
- venv, = repo._venvs
+ venv, = repo._venvs()
assert venv == (mock.ANY, 'python', python.get_default_version(), ['pep8'])
@pytest.mark.integration
-def test_additional_dependencies_duplicated(
- tempdir_factory, store, log_warning_mock,
-):
- path = make_repo(tempdir_factory, 'ruby_hooks_repo')
- config = make_config_from_repo(path)
- deps = ['thread_safe', 'tins', 'thread_safe']
- config['hooks'][0]['additional_dependencies'] = deps
- repo = Repository.create(config, store)
- venv, = repo._venvs
- assert venv == (mock.ANY, 'ruby', 'default', ['thread_safe', 'tins'])
-
-
[email protected]
-def test_additional_python_dependencies_installed(tempdir_factory, store):
+def test_additional_dependencies_roll_forward(tempdir_factory, store):
path = make_repo(tempdir_factory, 'python_hooks_repo')
- config = make_config_from_repo(path)
- config['hooks'][0]['additional_dependencies'] = ['mccabe']
- repo = Repository.create(config, store)
- repo.require_installed()
- with python.in_env(repo._prefix, 'default'):
- output = cmd_output('pip', 'freeze', '-l')[1]
- assert 'mccabe' in output
+ config1 = make_config_from_repo(path)
+ repo1 = Repository.create(config1, store)
+ repo1.require_installed()
+ (prefix1, _, version1, _), = repo1._venvs()
+ with python.in_env(prefix1, version1):
+ assert 'mccabe' not in cmd_output('pip', 'freeze', '-l')[1]
[email protected]
-def test_additional_dependencies_roll_forward(tempdir_factory, store):
- path = make_repo(tempdir_factory, 'python_hooks_repo')
- config = make_config_from_repo(path)
- # Run the repo once without additional_dependencies
- repo = Repository.create(config, store)
- repo.require_installed()
- # Now run it with additional_dependencies
- config['hooks'][0]['additional_dependencies'] = ['mccabe']
- repo = Repository.create(config, store)
- repo.require_installed()
- # We should see our additional dependency installed
- with python.in_env(repo._prefix, 'default'):
- output = cmd_output('pip', 'freeze', '-l')[1]
- assert 'mccabe' in output
+ # Make another repo with additional dependencies
+ config2 = make_config_from_repo(path)
+ config2['hooks'][0]['additional_dependencies'] = ['mccabe']
+ repo2 = Repository.create(config2, store)
+ repo2.require_installed()
+ (prefix2, _, version2, _), = repo2._venvs()
+ with python.in_env(prefix2, version2):
+ assert 'mccabe' in cmd_output('pip', 'freeze', '-l')[1]
+
+ # should not have affected original
+ with python.in_env(prefix1, version1):
+ assert 'mccabe' not in cmd_output('pip', 'freeze', '-l')[1]
@xfailif_windows_no_ruby
@@ -499,7 +482,8 @@ def test_additional_ruby_dependencies_installed(
config['hooks'][0]['additional_dependencies'] = ['thread_safe', 'tins']
repo = Repository.create(config, store)
repo.require_installed()
- with ruby.in_env(repo._prefix, 'default'):
+ (prefix, _, version, _), = repo._venvs()
+ with ruby.in_env(prefix, version):
output = cmd_output('gem', 'list', '--local')[1]
assert 'thread_safe' in output
assert 'tins' in output
@@ -516,7 +500,8 @@ def test_additional_node_dependencies_installed(
config['hooks'][0]['additional_dependencies'] = ['lodash']
repo = Repository.create(config, store)
repo.require_installed()
- with node.in_env(repo._prefix, 'default'):
+ (prefix, _, version, _), = repo._venvs()
+ with node.in_env(prefix, version):
output = cmd_output('npm', 'ls', '-g')[1]
assert 'lodash' in output
@@ -532,7 +517,8 @@ def test_additional_golang_dependencies_installed(
config['hooks'][0]['additional_dependencies'] = deps
repo = Repository.create(config, store)
repo.require_installed()
- binaries = os.listdir(repo._prefix.path(
+ (prefix, _, _, _), = repo._venvs()
+ binaries = os.listdir(prefix.path(
helpers.environment_dir(golang.ENVIRONMENT_DIR, 'default'), 'bin',
))
# normalize for windows
@@ -598,8 +584,9 @@ def test_control_c_control_c_on_install(tempdir_factory, store):
repo.run_hook(hook, [])
# Should have made an environment, however this environment is broken!
- envdir = 'py_env-{}'.format(python.get_default_version())
- assert repo._prefix.exists(envdir)
+ (prefix, _, version, _), = repo._venvs()
+ envdir = 'py_env-{}'.format(version)
+ assert prefix.exists(envdir)
# However, it should be perfectly runnable (reinstall after botched
# install)
@@ -616,8 +603,8 @@ def test_invalidated_virtualenv(tempdir_factory, store):
# Simulate breaking of the virtualenv
repo.require_installed()
- version = python.get_default_version()
- libdir = repo._prefix.path('py_env-{}'.format(version), 'lib', version)
+ (prefix, _, version, _), = repo._venvs()
+ libdir = prefix.path('py_env-{}'.format(version), 'lib', version)
paths = [
os.path.join(libdir, p) for p in ('site.py', 'site.pyc', '__pycache__')
]
diff --git a/scripts/populate_review_step.py b/scripts/populate_review_step.py
index 3ba1948..9cccfd0 100644
--- a/scripts/populate_review_step.py
+++ b/scripts/populate_review_step.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
import datetime
+from egcg_core import util
from cached_property import cached_property
from EPPs.common import StepEPP, RestCommunicationEPP, step_argparser
from EPPs.config import load_config
@@ -18,8 +19,8 @@ class StepPopulator(StepEPP, RestCommunicationEPP):
if io[0]['uri'].samples[0].name == sample_name and io[1]['output-type'] == 'ResultFile'
]
- def check_rest_data_and_artifacts(self, sample_name, selector):
- query_args = {selector: {'sample_id': sample_name}}
+ def check_rest_data_and_artifacts(self, sample_name):
+ query_args = {'where': {'sample_id': sample_name}}
rest_entities = self.get_documents(self.endpoint, **query_args)
artifacts = self.output_artifacts_per_sample(sample_name=sample_name)
if len(rest_entities) != len(artifacts): # in sample review this will be 1, in run review this will be more
@@ -30,6 +31,18 @@ class StepPopulator(StepEPP, RestCommunicationEPP):
)
return rest_entities, artifacts
+ def delivered(self, sample_name):
+ d = {'yes': True, 'no': False}
+ query_args = {'where': {'sample_id': sample_name}}
+ sample = self.get_documents('samples', **query_args)[0]
+ return d.get(sample.get('delivered'))
+
+ def processed(self, sample_name):
+ query_args = {'where': {'sample_id': sample_name}}
+ sample = self.get_documents('samples', **query_args)[0]
+ processing_status = util.query_dict(sample, 'aggregated.most_recent_proc.status')
+ return processing_status == 'finished'
+
def _run(self):
raise NotImplementedError
@@ -51,7 +64,7 @@ class PullInfo(StepPopulator):
self.lims.put_batch(artifacts_to_upload)
def add_artifact_info(self, sample):
- rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name, 'match')
+ rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name)
artifacts_to_upload = set()
for i in range(len(rest_entities)):
for art_field, api_field in self.metrics_mapping:
@@ -83,15 +96,16 @@ class PullInfo(StepPopulator):
class PullRunElementInfo(PullInfo):
- endpoint = 'aggregate/run_elements'
+ endpoint = 'run_elements'
metrics_mapping = [
('RE Id', 'run_element_id'),
('RE Nb Reads', 'passing_filter_reads'),
- ('RE Yield', 'clean_yield_in_gb'),
- ('RE Yield Q30', 'clean_yield_q30_in_gb'),
- ('RE %Q30', 'clean_pc_q30'),
+ ('RE Yield', 'aggregated.clean_yield_in_gb'),
+ ('RE Yield Q30', 'aggregated.clean_yield_q30_in_gb'),
+ ('RE %Q30', 'aggregated.clean_pc_q30'),
+ ('RE Coverage', 'coverage.mean'),
('RE Estimated Duplicate Rate', 'lane_pc_optical_dups'),
- ('RE %Adapter', 'pc_adapter'),
+ ('RE %Adapter', 'aggregated.pc_adaptor'),
('RE Review status', 'reviewed'),
('RE Review Comment', 'review_comments'),
('RE Review date', 'review_date'),
@@ -102,7 +116,6 @@ class PullRunElementInfo(PullInfo):
def assess_sample(self, sample):
artifacts_to_upload = set()
-
artifacts = self.output_artifacts_per_sample(sample_name=sample.name)
un_reviewed_artifacts = [a for a in artifacts if a.udf.get('RE Review status') not in ['pass', 'fail']]
if un_reviewed_artifacts:
@@ -111,36 +124,69 @@ class PullRunElementInfo(PullInfo):
# Artifacts that pass the review
pass_artifacts = [a for a in artifacts if a.udf.get('RE Review status') == 'pass']
-
# Artifacts that fail the review
fail_artifacts = [a for a in artifacts if a.udf.get('RE Review status') == 'fail']
+ # Artifacts that are new
+ new_artifacts = [a for a in artifacts if a.udf.get('RE previous Useable') not in ['yes', 'no']]
- target_yield = float(sample.udf.get('Yield for Quoted Coverage (Gb)'))
- good_re_yield = sum([float(a.udf.get('RE Yield Q30')) for a in pass_artifacts])
+ # skip samples which have been delivered, mark any new REs as such, not changing older RE comments
+ if self.delivered(sample.name):
+ for a in new_artifacts:
+ a.udf['RE Useable Comment'] = 'AR: Delivered'
+ a.udf['RE Useable'] = 'no'
- # Just the right amount of good yield: take it all
- if target_yield < good_re_yield < target_yield * 2:
- for a in pass_artifacts:
- a.udf['RE Useable'] = 'yes'
- a.udf['RE Useable Comment'] = 'AR: Good yield'
- for a in fail_artifacts:
+ for a in pass_artifacts + fail_artifacts:
+ if a.udf.get('RE previous Useable Comment') and a.udf.get('RE previous Useable'):
+ a.udf['RE Useable Comment'] = a.udf.get('RE previous Useable Comment')
+ a.udf['RE Useable'] = a.udf.get('RE previous Useable')
+
+ artifacts_to_upload.update(artifacts)
+ return artifacts_to_upload
+
+ # skip samples which have been processed, mark any new REs as such, not changing older RE comments
+ if self.processed(sample.name):
+ for a in pass_artifacts + fail_artifacts:
+ if a.udf.get('RE previous Useable Comment') and a.udf.get('RE previous Useable'):
+ a.udf['RE Useable Comment'] = a.udf.get('RE previous Useable Comment')
+ a.udf['RE Useable'] = a.udf.get('RE previous Useable')
+
+ for a in new_artifacts:
+ a.udf['RE Useable Comment'] = 'AR: Sample already processed'
a.udf['RE Useable'] = 'no'
- a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
+
artifacts_to_upload.update(artifacts)
+ return artifacts_to_upload
+
+ target_yield = float(sample.udf.get('Required Yield (Gb)'))
+ good_re_yield = sum([float(a.udf.get('RE Yield')) for a in pass_artifacts])
+
+ # Increase target coverage by 5% to resolve borderline cases
+ target_coverage = 1.05 * sample.udf.get('Coverage (X)')
+ obtained_coverage = float(sum([a.udf.get('RE Coverage') for a in pass_artifacts]))
# Too much good yield limit to the best quality ones
- elif good_re_yield > target_yield * 2:
+ if good_re_yield > target_yield * 2 and obtained_coverage > target_coverage:
# Too much yield: sort the good artifact by quality
pass_artifacts.sort(key=lambda x: x.udf.get('RE %Q30'), reverse=True)
current_yield = 0
for a in pass_artifacts:
- current_yield += float(a.udf.get('RE Yield Q30'))
+ current_yield += float(a.udf.get('RE Yield'))
if current_yield < target_yield * 2:
a.udf['RE Useable'] = 'yes'
a.udf['RE Useable Comment'] = 'AR: Good yield'
else:
a.udf['RE Useable'] = 'no'
- a.udf['RE Useable Comment'] = 'AR: To much good yield'
+ a.udf['RE Useable Comment'] = 'AR: Too much good yield'
+ for a in fail_artifacts:
+ a.udf['RE Useable'] = 'no'
+ a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
+ artifacts_to_upload.update(artifacts)
+
+ # Just the right amount of good yield: take it all
+ elif target_yield < good_re_yield < target_yield * 2 or obtained_coverage > target_coverage:
+ for a in pass_artifacts:
+ a.udf['RE Useable'] = 'yes'
+ a.udf['RE Useable Comment'] = 'AR: Good yield'
for a in fail_artifacts:
a.udf['RE Useable'] = 'no'
a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
@@ -153,16 +199,16 @@ class PullRunElementInfo(PullInfo):
class PullSampleInfo(PullInfo):
- endpoint = 'aggregate/samples'
+ endpoint = 'samples'
metrics_mapping = [
- ('SR Yield (Gb)', 'clean_yield_in_gb'),
- ('SR %Q30', 'clean_pc_q30'),
- ('SR % Mapped', 'pc_mapped_reads'),
- ('SR % Duplicates', 'pc_duplicate_reads'),
- ('SR Mean Coverage', 'coverage.mean'),
- ('SR Species Found', 'species_contamination'),
- ('SR Sex Check Match', 'gender_match'),
- ('SR Genotyping Match', 'genotype_match'),
+ ('SR Yield (Gb)', 'aggregated.clean_yield_in_gb'),
+ ('SR %Q30', 'aggregated.clean_pc_q30'),
+ ('SR % Mapped', 'aggregated.pc_mapped_reads'),
+ ('SR % Duplicates', 'aggregated.pc_duplicate_reads'),
+ ('SR Mean Coverage', 'aggregated.mean_coverage'),
+ ('SR Species Found', 'matching_species'),
+ ('SR Sex Check Match', 'aggregated.gender_match'),
+ ('SR Genotyping Match', 'aggregated.genotype_match'),
('SR Freemix', 'sample_contamination.freemix'),
('SR Review Status', 'reviewed'),
('SR Review Comments', 'review_comments'),
@@ -192,9 +238,9 @@ class PullSampleInfo(PullInfo):
def field_from_entity(self, entity, api_field):
# TODO: remove once Rest API has a sensible field for species found
- if api_field == 'species_contamination':
- species = entity[api_field]['contaminant_unique_mapped']
- return ', '.join(k for k in sorted(species) if species[k] > 500)
+ if api_field == 'matching_species':
+ species = entity[api_field]
+ return ', '.join(species)
return super().field_from_entity(entity, api_field)
@@ -214,7 +260,7 @@ class PushInfo(StepPopulator):
_ = self.output_artifacts
for sample in self.samples:
self.info('Pushing data for sample %s', sample.name)
- rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name, 'where')
+ rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name)
rest_api_data = {}
for e in rest_entities:
rest_api_data[e[self.api_id_field]] = e
Add new rules to sample assessment in Run review
The sample assessment is using the require yield Q30 instead of the yield and %Q30 which makes it inaccurate in some cases. Change to use Yield and %Q30
- [ ] Add strategy to deal with sample that have been delivered already
- [ ] Add strategy to protect previous call when additional data is generated but not needed assuming the resulting coverage met requirement.
- [ ] Add strategy to take coverage into account.
coveralls:
[](https://coveralls.io/builds/15706170)
Coverage increased (+0.002%) to 98.68% when pulling **f70ca7456ba0dacfd653fcdaf2fd2dea8abfe113 on fix-issue-124** into **8f491d8dda1a217d9233b0f4680e11c52dafaa2a on master**.
diff --git a/simulators/active_surface.py b/simulators/active_surface.py
index 5bea129..afd751a 100644
--- a/simulators/active_surface.py
+++ b/simulators/active_surface.py
@@ -579,7 +579,7 @@ class System(ListeningSystem):
self.drivers[params[0]].current_position
)
- val = utils.binary_to_bytes(binary_position)
+ val = utils.binary_to_bytes(binary_position, little_endian=False)
if params[1] == 0xFA:
retval += val
@@ -642,7 +642,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- frequency = utils.bytes_to_int([chr(x) for x in params[2]])
+ frequency = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if frequency >= 20 and frequency <= 10000:
if params[0] == -1:
@@ -669,7 +672,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- frequency = utils.bytes_to_int([chr(x) for x in params[2]])
+ frequency = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if frequency >= 20 and frequency <= 10000:
if params[0] == -1:
@@ -714,7 +720,8 @@ class System(ListeningSystem):
return self.byte_nak
else:
reference_position = utils.bytes_to_int(
- [chr(x) for x in params[2]]
+ [chr(x) for x in params[2]],
+ little_endian=False
)
if params[0] == -1:
@@ -811,7 +818,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- absolute_position = utils.bytes_to_int([chr(x) for x in params[2]])
+ absolute_position = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if params[0] == -1:
for driver in self.drivers:
@@ -829,7 +839,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- relative_position = utils.bytes_to_int([chr(x) for x in params[2]])
+ relative_position = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if params[0] == -1:
for driver in self.drivers:
@@ -867,7 +880,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- velocity = utils.bytes_to_int([chr(x) for x in params[2]])
+ velocity = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if velocity > 100000 or velocity < -100000:
if params[0] == -1:
diff --git a/simulators/acu.py b/simulators/acu.py
index 2beeb1a..6faa96e 100644
--- a/simulators/acu.py
+++ b/simulators/acu.py
@@ -17,8 +17,8 @@ from simulators.common import ListeningSystem, SendingSystem
servers = []
servers.append((('127.0.0.1', 13000), ('127.0.0.1', 13001), ()))
-start_flag = b'\x1D\xFC\xCF\x1A'
-end_flag = b'\xA1\xFC\xCF\xD1'
+start_flag = b'\x1A\xCF\xFC\x1D'
+end_flag = b'\xD1\xCF\xFC\xA1'
class System(ListeningSystem, SendingSystem):
@@ -80,7 +80,7 @@ class System(ListeningSystem, SendingSystem):
return False
if len(self.msg) == 8:
- self.msg_length = utils.bytes_to_int(self.msg[-4:])
+ self.msg_length = utils.bytes_to_uint(self.msg[-4:])
if len(self.msg) == 12:
macro_cmd_counter = utils.bytes_to_uint(self.msg[-4:])
diff --git a/simulators/acu_status/acu_utils.py b/simulators/acu_status/acu_utils.py
index 4abd82e..8c46654 100644
--- a/simulators/acu_status/acu_utils.py
+++ b/simulators/acu_status/acu_utils.py
@@ -1,8 +1,8 @@
import time
from simulators import utils
-start_flag = b'\x1D\xFC\xCF\x1A'
-end_flag = b'\xA1\xFC\xCF\xD1'
+start_flag = b'\x1A\xCF\xFC\x1D'
+end_flag = b'\xD1\xCF\xFC\xA1'
class ModeCommand(object):
diff --git a/simulators/utils.py b/simulators/utils.py
index b839bd4..20c4ad4 100644
--- a/simulators/utils.py
+++ b/simulators/utils.py
@@ -121,10 +121,10 @@ def int_to_twos(val, n_bytes=4):
return ("{0:0>%s}" % n_bits).format(binary_string)
-def binary_to_bytes(binary_string):
+def binary_to_bytes(binary_string, little_endian=True):
"""Convert a binary string in a string of bytes.
- >>> binary_to_bytes('0110100001100101011011000110110001101111')
+ >>> binary_to_bytes('0110100001100101011011000110110001101111', False)
'\x68\x65\x6C\x6C\x6F'
"""
@@ -133,31 +133,37 @@ def binary_to_bytes(binary_string):
for i in range(0, len(binary_string), 8):
byte_string += chr(int(binary_string[i:i + 8], 2))
- return byte_string
+ return byte_string[::-1] if little_endian else byte_string
-def bytes_to_int(byte_string):
+def bytes_to_int(byte_string, little_endian=True):
"""Convert a string of bytes to an integer (like C atoi function).
- >>> bytes_to_int(b'hello')
+ >>> bytes_to_int(b'hello', False)
448378203247
"""
binary_string = ''
+ if little_endian:
+ byte_string = byte_string[::-1]
+
for char in byte_string:
binary_string += bin(ord(char))[2:].zfill(8)
return twos_to_int(binary_string)
-def bytes_to_uint(byte_string):
+def bytes_to_uint(byte_string, little_endian=True):
"""Convert a string of bytes to an unsigned integer.
- >>> bytes_to_uint(b'hi')
+ >>> bytes_to_uint(b'hi', little_endian=False)
26729
"""
binary_string = ''
+ if little_endian:
+ byte_string = byte_string[::-1]
+
for char in byte_string:
binary_string += bin(ord(char))[2:].zfill(8)
@@ -198,32 +204,35 @@ def real_to_binary(num, precision=1):
)
-def real_to_bytes(num, precision=1):
+def real_to_bytes(num, precision=1, little_endian=True):
"""Return the bytestring representation of a floating-point number
(IEEE 754 standard).
- >>> [hex(ord(x)) for x in real_to_bytes(436.56, 1)]
+ >>> [hex(ord(x)) for x in real_to_bytes(436.56, 1, False)]
['0x43', '0xda', '0x47', '0xae']
- >>> [hex(ord(x)) for x in real_to_bytes(436.56, 2)]
+ >>> [hex(ord(x)) for x in real_to_bytes(436.56, 2, False)]
['0x40', '0x7b', '0x48', '0xf5', '0xc2', '0x8f', '0x5c', '0x29']
"""
binary_number = real_to_binary(num, precision)
- return binary_to_bytes(binary_number)
+ return binary_to_bytes(binary_number, little_endian=little_endian)
-def bytes_to_real(bytes_real, precision=1):
+def bytes_to_real(bytes_real, precision=1, little_endian=True):
"""Return the floating-point representation (IEEE 754 standard)
of bytestring number.
- >>> round(bytes_to_real('\x43\xDA\x47\xAE', 1), 2)
+ >>> round(bytes_to_real('\x43\xDA\x47\xAE', 1, False), 2)
436.56
- >>> round(bytes_to_real('\x40\x7B\x48\xF5\xC2\x8F\x5C\x29', 2), 2)
+ >>> round(bytes_to_real('\x40\x7B\x48\xF5\xC2\x8F\x5C\x29', 2, False), 2)
436.56
"""
+ if little_endian:
+ bytes_real = bytes_real[::-1]
+
if precision == 1:
return struct.unpack('!f', bytes_real)[0]
elif precision == 2:
@@ -235,20 +244,20 @@ def bytes_to_real(bytes_real, precision=1):
)
-def int_to_bytes(val, n_bytes=4):
+def int_to_bytes(val, n_bytes=4, little_endian=True):
"""Return the bytestring representation of a given signed integer.
- >>> [hex(ord(x)) for x in int_to_bytes(354)]
+ >>> [hex(ord(x)) for x in int_to_bytes(354, little_endian=False)]
['0x0', '0x0', '0x1', '0x62']
"""
- return binary_to_bytes(int_to_twos(val, n_bytes))
+ return binary_to_bytes(int_to_twos(val, n_bytes), little_endian)
-def uint_to_bytes(val, n_bytes=4):
+def uint_to_bytes(val, n_bytes=4, little_endian=True):
"""Return the bytestring representation of a given unsigned integer.
- >>> [hex(ord(x)) for x in uint_to_bytes(657)]
+ >>> [hex(ord(x)) for x in uint_to_bytes(657, little_endian=False)]
['0x0', '0x0', '0x2', '0x91']
"""
@@ -262,7 +271,10 @@ def uint_to_bytes(val, n_bytes=4):
% (val, min_range, max_range)
)
- return binary_to_bytes(bin(val)[2:].zfill(n_bytes * 8))
+ return binary_to_bytes(
+ bin(val)[2:].zfill(n_bytes * 8),
+ little_endian=little_endian
+ )
def sign(number):
ACU commands and status use little-endian byte order, the current implementation uses big-endian.
discos/simulators
diff --git a/tests/test_acu.py b/tests/test_acu.py
index 674856f..f305c94 100644
--- a/tests/test_acu.py
+++ b/tests/test_acu.py
@@ -20,7 +20,7 @@ class TestACU(unittest.TestCase):
def test_status_message_length(self):
status = self.system.get_message()
- msg_length = utils.bytes_to_int(status[4:8])
+ msg_length = utils.bytes_to_uint(status[4:8])
self.assertEqual(msg_length, 813)
def test_duplicated_macro_command_counter(self):
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 86f8db5..6763dbe 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -68,7 +68,7 @@ class TestServer(unittest.TestCase):
def test_right_binary_to_bytes(self):
"""Convert a binary string into a string of bytes."""
binary_string = '00000101000110100010100011010010'
- byte_string = utils.binary_to_bytes(binary_string)
+ byte_string = utils.binary_to_bytes(binary_string, little_endian=False)
expected_byte_string = b'\x05\x1A\x28\xD2'
self.assertEqual(byte_string, expected_byte_string)
@@ -81,7 +81,7 @@ class TestServer(unittest.TestCase):
def test_right_bytes_to_int(self):
"""Convert a string of bytes into an integer (like C atoi function)."""
byte_string = b'\x00\x00\xFA\xFF'
- result = utils.bytes_to_int(byte_string)
+ result = utils.bytes_to_int(byte_string, little_endian=False)
expected_result = 64255
self.assertEqual(result, expected_result)
@@ -137,13 +137,13 @@ class TestServer(unittest.TestCase):
def test_real_to_bytes_single_precision(self):
"""Convert a real number to a string of bytes."""
number = 45.12371938725634
- result = utils.real_to_bytes(number)
+ result = utils.real_to_bytes(number, little_endian=False)
expected_result = b'\x42\x34\x7E\xB0'
self.assertEqual(result, expected_result)
def test_real_to_bytes_double_precision(self):
number = 3.14159265358979323846264338327950288419716939937510582097494
- result = utils.real_to_bytes(number, 2)
+ result = utils.real_to_bytes(number, precision=2, little_endian=False)
expected_result = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
self.assertEqual(result, expected_result)
@@ -155,13 +155,17 @@ class TestServer(unittest.TestCase):
def test_bytes_to_real_single_precision(self):
"""Convert a string of bytes to a floating point number."""
byte_string = b'\x42\x34\x7E\xB0'
- result = utils.bytes_to_real(byte_string)
+ result = utils.bytes_to_real(byte_string, little_endian=False)
expected_result = 45.12371826171875
self.assertEqual(result, expected_result)
def test_bytes_to_real_double_precision(self):
byte_string = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
- result = utils.bytes_to_real(byte_string, 2)
+ result = utils.bytes_to_real(
+ byte_string,
+ precision=2,
+ little_endian=False
+ )
expected_result = (
3.14159265358979323846264338327950288419716939937510582097494
)
@@ -175,13 +179,13 @@ class TestServer(unittest.TestCase):
def test_int_to_bytes_positive(self):
"""Convert a signed integer to a string of bytes."""
number = 232144
- result = utils.int_to_bytes(number)
+ result = utils.int_to_bytes(number, little_endian=False)
expected_result = b'\x00\x03\x8A\xD0'
self.assertEqual(result, expected_result)
def test_int_to_bytes_negative(self):
number = -4522764
- result = utils.int_to_bytes(number)
+ result = utils.int_to_bytes(number, little_endian=False)
expected_result = b'\xFF\xBA\xFC\xF4'
self.assertEqual(result, expected_result)
@@ -199,7 +203,7 @@ class TestServer(unittest.TestCase):
def test_uint_to_bytes(self):
"""Convert an unsigned integer to a string of bytes."""
number = 1284639736
- result = utils.uint_to_bytes(number)
+ result = utils.uint_to_bytes(number, little_endian=False)
expected_result = b'\x4C\x92\x0B\xF8'
self.assertEqual(result, expected_result)
diff --git a/pika/adapters/blocking_connection.py b/pika/adapters/blocking_connection.py
index b1fb7dc..be1db63 100644
--- a/pika/adapters/blocking_connection.py
+++ b/pika/adapters/blocking_connection.py
@@ -542,22 +542,24 @@ class BlockingConnection(object):
self.add_timeout(0, user_callback)
- def _on_connection_blocked(self, user_callback, method_frame):
+ def _on_connection_blocked(self, user_callback, _impl, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param callable user_callback: callback passed to
`add_on_connection_blocked_callback`
+ :param SelectConnection _impl:
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
self._ready_events.append(
_ConnectionBlockedEvt(user_callback, method_frame))
- def _on_connection_unblocked(self, user_callback, method_frame):
+ def _on_connection_unblocked(self, user_callback, _impl, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param callable user_callback: callback passed to
`add_on_connection_unblocked_callback`
+ :param SelectConnection _impl:
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
@@ -595,13 +597,14 @@ class BlockingConnection(object):
See also `ConnectionParameters.blocked_connection_timeout`.
:param method callback: Callback to call on `Connection.Blocked`,
- having the signature `callback(pika.frame.Method)`, where the
- method frame's `method` member is of type
- `pika.spec.Connection.Blocked`
+ having the signature `callback(connection, pika.frame.Method)`,
+ where connection is the `BlockingConnection` instance and the method
+ frame's `method` member is of type `pika.spec.Connection.Blocked`
"""
self._impl.add_on_connection_blocked_callback(
- functools.partial(self._on_connection_blocked, callback))
+ functools.partial(self._on_connection_blocked,
+ functools.partial(callback, self)))
def add_on_connection_unblocked_callback(self, callback):
"""Add a callback to be notified when RabbitMQ has sent a
@@ -609,14 +612,15 @@ class BlockingConnection(object):
to start publishing again. The callback will be passed the
`Connection.Unblocked` method frame.
- :param method callback: Callback to call on
- `Connection.Unblocked`, having the signature
- `callback(pika.frame.Method)`, where the method frame's
- `method` member is of type `pika.spec.Connection.Unblocked`
+ :param method callback: Callback to call on Connection.Unblocked`,
+ having the signature `callback(connection, pika.frame.Method)`,
+ where connection is the `BlockingConnection` instance and the method
+ frame's `method` member is of type `pika.spec.Connection.Unblocked`
"""
self._impl.add_on_connection_unblocked_callback(
- functools.partial(self._on_connection_unblocked, callback))
+ functools.partial(self._on_connection_unblocked,
+ functools.partial(callback, self)))
def add_timeout(self, deadline, callback):
"""Create a single-shot timer to fire after deadline seconds. Do not
diff --git a/pika/connection.py b/pika/connection.py
index c784cc2..25ec506 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -6,6 +6,7 @@ import ast
import sys
import collections
import copy
+import functools
import logging
import math
import numbers
@@ -1018,11 +1019,9 @@ class Connection(object):
# pylint: disable=R0201
ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure'
- ON_CONNECTION_BLOCKED = '_on_connection_blocked'
ON_CONNECTION_CLOSED = '_on_connection_closed'
ON_CONNECTION_ERROR = '_on_connection_error'
ON_CONNECTION_OPEN = '_on_connection_open'
- ON_CONNECTION_UNBLOCKED = '_on_connection_unblocked'
CONNECTION_CLOSED = 0
CONNECTION_INIT = 1
CONNECTION_PROTOCOL = 2
@@ -1146,19 +1145,21 @@ class Connection(object):
instead of relying on back pressure throttling. The callback
will be passed the ``Connection.Blocked`` method frame.
- TODO Also pass the connection as the callback's first arg
-
See also `ConnectionParameters.blocked_connection_timeout`.
:param method callback: Callback to call on `Connection.Blocked`,
- having the signature `callback(pika.frame.Method)`, where the
- method frame's `method` member is of type
+ having the signature `callback(connection, pika.frame.Method)`,
+ where the method frame's `method` member is of type
`pika.spec.Connection.Blocked`
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
- self.callbacks.add(0, spec.Connection.Blocked, callback, False)
+
+ self.callbacks.add(0,
+ spec.Connection.Blocked,
+ functools.partial(callback, self),
+ one_shot=False)
def add_on_connection_unblocked_callback(self, callback):
"""Add a callback to be notified when RabbitMQ has sent a
@@ -1166,17 +1167,19 @@ class Connection(object):
to start publishing again. The callback will be passed the
``Connection.Unblocked`` method frame.
- TODO Also pass the connection as the callback's first arg
-
:param method callback: Callback to call on
`Connection.Unblocked`, having the signature
- `callback(pika.frame.Method)`, where the method frame's
+ `callback(connection, pika.frame.Method)`, where the method frame's
`method` member is of type `pika.spec.Connection.Unblocked`
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
- self.callbacks.add(0, spec.Connection.Unblocked, callback, False)
+
+ self.callbacks.add(0,
+ spec.Connection.Unblocked,
+ functools.partial(callback, self),
+ one_shot=False)
def add_on_open_callback(self, callback):
"""Add a callback notification when the connection has opened. The
@@ -1789,7 +1792,7 @@ class Connection(object):
self._on_terminate(InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT,
'Blocked connection timeout expired')
- def _on_connection_blocked(self, method_frame):
+ def _on_connection_blocked(self, _connection, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
@@ -1808,7 +1811,7 @@ class Connection(object):
self.params.blocked_connection_timeout,
self._on_blocked_connection_timeout)
- def _on_connection_unblocked(self, method_frame):
+ def _on_connection_unblocked(self, _connection, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
Pass connection instance as first arg of callbacks for add_on_connection_blocked_callback and add_on_connection_unblocked_callback
Targeting 1.0.0
diff --git a/src/pynwb/ogen.py b/src/pynwb/ogen.py
index d05832fd..a4a21f9c 100644
--- a/src/pynwb/ogen.py
+++ b/src/pynwb/ogen.py
@@ -5,6 +5,7 @@ from .form.utils import docval, popargs, fmt_docval_args
from . import register_class, CORE_NAMESPACE
from .base import TimeSeries, _default_resolution, _default_conversion
from .core import NWBContainer
+from .ecephys import Device
@register_class('OptogeneticStimulusSite', CORE_NAMESPACE)
@@ -19,7 +20,7 @@ class OptogeneticStimulusSite(NWBContainer):
@docval({'name': 'name', 'type': str, 'doc': 'The name of this stimulus site'},
{'name': 'source', 'type': str, 'doc': 'the source of the data'},
- {'name': 'device', 'type': str, 'doc': 'Name of device in /general/devices'},
+ {'name': 'device', 'type': Device, 'doc': 'the device that was used'},
{'name': 'description', 'type': str, 'doc': 'Description of site.'},
{'name': 'excitation_lambda', 'type': str, 'doc': 'Excitation wavelength.'},
{'name': 'location', 'type': str, 'doc': 'Location of stimulation site.'})
`nwbfile.create_ogen_site` device argument expects string
the `device` argument of `nwbfile.create_ogen_site` takes a string that is the name of the device. I think it should also take the device object. This will make usage more consistent with `nwbfile.create_electrode_group` and `nwbfile.add_electrode` (with `electrode_group`) where the object is given, not the name of the object. Is there a reason the name of the device is used here?
NeurodataWithoutBorders/pynwb
diff --git a/tests/unit/pynwb_tests/test_ogen.py b/tests/unit/pynwb_tests/test_ogen.py
index bee99992..b80243d4 100644
--- a/tests/unit/pynwb_tests/test_ogen.py
+++ b/tests/unit/pynwb_tests/test_ogen.py
@@ -1,14 +1,16 @@
import unittest
from pynwb.ogen import OptogeneticSeries, OptogeneticStimulusSite
+from pynwb.ecephys import Device
class OptogeneticSeriesConstructor(unittest.TestCase):
def test_init(self):
- oS = OptogeneticStimulusSite('site1', 'a test source', 'device', 'description', 'excitation_lambda', 'location')
+ device = Device('name', 'source')
+ oS = OptogeneticStimulusSite('site1', 'a test source', device, 'description', 'excitation_lambda', 'location')
self.assertEqual(oS.name, 'site1')
- self.assertEqual(oS.device, 'device')
+ self.assertEqual(oS.device, device)
self.assertEqual(oS.description, 'description')
self.assertEqual(oS.excitation_lambda, 'excitation_lambda')
self.assertEqual(oS.location, 'location')
diff --git a/joblib/memory.py b/joblib/memory.py
index 0f005dd..18b20f5 100644
--- a/joblib/memory.py
+++ b/joblib/memory.py
@@ -767,7 +767,7 @@ class Memory(Logger):
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent. This option
- replaces cachedir since version 0.11.
+ replaces cachedir since version 0.12.
backend: str or 'local'
Type of store backend for reading/writing cache files.
@@ -776,8 +776,8 @@ class Memory(Logger):
backend.
cachedir: str or None
- cachedir is deprecated since version 0.11 and will be
- removed in 0.13. Please consider using location option instead.
+ cachedir is deprecated since version 0.12 and will be
+ removed in 0.14. Please consider using location option instead.
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
@@ -818,17 +818,18 @@ class Memory(Logger):
if cachedir is not None:
if location is None:
warnings.warn("cachedir option is deprecated since version "
- "0.10 and will be removed after version 0.12.\n"
+ "0.12 and will be removed in version 0.14.\n"
"Use option location=<store location> "
"instead.", DeprecationWarning, stacklevel=2)
location = cachedir
else:
warnings.warn("You set both location and cachedir options."
"cachedir is deprecated since version "
- "0.10 and will be removed after version 0.12.\n"
+ "0.12 and will be removed in version 0.14.\n"
"cachedir value will be ignored.",
DeprecationWarning, stacklevel=2)
+ self.location = location
if isinstance(location, _basestring):
location = os.path.join(location, 'joblib')
@@ -837,6 +838,14 @@ class Memory(Logger):
backend_options=dict(compress=compress, mmap_mode=mmap_mode,
**backend_options))
+ @property
+ def cachedir(self):
+ warnings.warn("cachedir option is deprecated since version "
+ "0.12 and will be removed in version 0.14.\n"
+ "Use option location=<store location> "
+ "instead.", DeprecationWarning, stacklevel=2)
+ return self.location
+
def cache(self, func=None, ignore=None, verbose=None, mmap_mode=False):
""" Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Memory object does not have a cachedir attribute in master
Seems like an oversight from #397. The doc says that cachedir is deprecated but actually it is not set:
https://github.com/joblib/joblib/blob/a0e1f69d2be31e9e6be1f5e346988bc04df7ff75/joblib/memory.py#L778-L783
It'd be a good idea to double-check that other similar things have not been overseen.
Snippet:
```python
from joblib import Memory
mem = Memory('/tmp/test')
print(mem.cachedir)
```
joblib/joblib
diff --git a/joblib/test/test_memory.py b/joblib/test/test_memory.py
index 7d74506..00f384c 100644
--- a/joblib/test/test_memory.py
+++ b/joblib/test/test_memory.py
@@ -383,6 +383,11 @@ def test_func_dir(tmpdir):
location = os.path.join(g.store_backend.location, func_id)
assert location == path
assert os.path.exists(path)
+ assert memory.location == os.path.dirname(g.store_backend.location)
+ with warns(DeprecationWarning) as w:
+ assert memory.cachedir == os.path.dirname(g.store_backend.location)
+ assert len(w) == 1
+ assert "cachedir option is deprecated since version" in str(w[-1].message)
# Test that the code is stored.
# For the following test to be robust to previous execution, we clear
diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py
index 2b6bb6ed3..301a35844 100644
--- a/nipype/pipeline/engine/utils.py
+++ b/nipype/pipeline/engine/utils.py
@@ -1050,7 +1050,17 @@ def generate_expanded_graph(graph_in):
expansions = defaultdict(list)
for node in graph_in.nodes():
for src_id in list(old_edge_dict.keys()):
- if node.itername.startswith(src_id):
+ # Drop the original JoinNodes; only concerned with
+ # generated Nodes
+ if hasattr(node, 'joinfield'):
+ continue
+ # Patterns:
+ # - src_id : Non-iterable node
+ # - src_id.[a-z]\d+ : IdentityInterface w/ iterables
+ # - src_id.[a-z]I.[a-z]\d+ : Non-IdentityInterface w/ iterables
+ # - src_idJ\d+ : JoinNode(IdentityInterface)
+ if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$',
+ node.itername):
expansions[src_id].append(node)
for in_id, in_nodes in list(expansions.items()):
logger.debug("The join node %s input %s was expanded"
Issue with node name that starts with another node's name
I think the [line ~801 in util.py](https://github.com/nipy/nipype/edit/master/nipype/pipeline/engine/utils.py#L801) should be something like this:
for node in graph_in.nodes():
for src_id, edge_data in list(old_edge_dict.items()):
if node.itername.startswith(src_id + '.'): # <-- add '.' to src_id
expansions[src_id].append(node)
For example, if the node "input" feeds into "input_files", the "input_files" can be included if you just test for node.itername.startswith(src_id). This change would prevent "input_files" from being included.
Edit: removed last part of my comment.
nipy/nipype
diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py
index 436d29d9e..54ff15048 100644
--- a/nipype/pipeline/engine/tests/test_join.py
+++ b/nipype/pipeline/engine/tests/test_join.py
@@ -7,11 +7,9 @@ from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import open
-import os
-
from ... import engine as pe
from ....interfaces import base as nib
-from ....interfaces.utility import IdentityInterface
+from ....interfaces.utility import IdentityInterface, Function, Merge
from ....interfaces.base import traits, File
@@ -612,3 +610,20 @@ def test_nested_workflow_join(tmpdir):
# there should be six nodes in total
assert len(result.nodes()) == 6, \
"The number of expanded nodes is incorrect."
+
+
+def test_name_prefix_join(tmpdir):
+ tmpdir.chdir()
+
+ def sq(x):
+ return x ** 2
+
+ wf = pe.Workflow('wf', base_dir=tmpdir.strpath)
+ square = pe.Node(Function(function=sq), name='square')
+ square.iterables = [('x', [1, 2])]
+ square_join = pe.JoinNode(Merge(1, ravel_inputs=True),
+ name='square_join',
+ joinsource='square',
+ joinfield=['in1'])
+ wf.connect(square, 'out', square_join, "in1")
+ wf.run()
diff --git a/pydicom/dataset.py b/pydicom/dataset.py
index 1052b8587..8d12864bb 100644
--- a/pydicom/dataset.py
+++ b/pydicom/dataset.py
@@ -28,8 +28,8 @@ from pydicom.datadict import (tag_for_keyword, keyword_for_tag,
repeater_has_keyword)
from pydicom.tag import Tag, BaseTag, tag_in_exception
from pydicom.dataelem import DataElement, DataElement_from_raw, RawDataElement
-from pydicom.uid import (UncompressedPixelTransferSyntaxes,
- ExplicitVRLittleEndian)
+from pydicom.uid import (UncompressedPixelTransferSyntaxes,
+ ExplicitVRLittleEndian)
import pydicom # for dcmwrite
import pydicom.charset
from pydicom.config import logger
@@ -400,14 +400,13 @@ class Dataset(dict):
return True
if isinstance(other, self.__class__):
- # Compare Elements using values() and class variables using
- # __dict__
+ # Compare Elements using values()
# Convert values() to a list for compatibility between
# python 2 and 3
# Sort values() by element tag
self_elem = sorted(list(self.values()), key=lambda x: x.tag)
other_elem = sorted(list(other.values()), key=lambda x: x.tag)
- return self_elem == other_elem and self.__dict__ == other.__dict__
+ return self_elem == other_elem
return NotImplemented
@@ -697,9 +696,9 @@ class Dataset(dict):
Returns
-------
- None
+ None
Converted pixel data is stored internally in the dataset.
-
+
If a compressed image format, the image is decompressed,
and any related data elements are changed accordingly.
"""
@@ -720,7 +719,9 @@ class Dataset(dict):
pixel_array = x.get_pixeldata(self)
self._pixel_array = self._reshape_pixel_array(pixel_array)
if x.needs_to_convert_to_RGB(self):
- self._pixel_array = self._convert_YBR_to_RGB(self._pixel_array)
+ self._pixel_array = self._convert_YBR_to_RGB(
+ self._pixel_array
+ )
successfully_read_pixel_data = True
break
except Exception as e:
@@ -743,31 +744,31 @@ class Dataset(dict):
raise NotImplementedError(msg)
# is this guaranteed to work if memory is re-used??
self._pixel_id = id(self.PixelData)
-
+
def decompress(self):
"""Decompresses pixel data and modifies the Dataset in-place
- If not a compressed tranfer syntax, then pixel data is converted
- to a numpy array internally, but not returned.
-
- If compressed pixel data, then is decompressed using an image handler,
- and internal state is updated appropriately:
- - TransferSyntax is updated to non-compressed form
- - is_undefined_length for pixel data is set False
+ If not a compressed tranfer syntax, then pixel data is converted
+ to a numpy array internally, but not returned.
+
+ If compressed pixel data, then is decompressed using an image handler,
+ and internal state is updated appropriately:
+ - TransferSyntax is updated to non-compressed form
+ - is_undefined_length for pixel data is set False
Returns
-------
None
- Raises
+ Raises
------
NotImplementedError
If the pixel data was originally compressed but file is not
- ExplicitVR LittleEndian as required by Dicom standard
- """
+ ExplicitVR LittleEndian as required by Dicom standard
+ """
self.convert_pixel_data()
self.is_decompressed = True
- # May have been undefined length pixel data, but won't be now
+ # May have been undefined length pixel data, but won't be now
if 'PixelData' in self:
self[0x7fe00010].is_undefined_length = False
@@ -780,14 +781,13 @@ class Dataset(dict):
# Check that current file as read does match expected
if not self.is_little_endian or self.is_implicit_VR:
msg = ("Current dataset does not match expected ExplicitVR "
- "LittleEndian transfer syntax from a compressed "
+ "LittleEndian transfer syntax from a compressed "
"transfer syntax")
raise NotImplementedError(msg)
-
+
# All is as expected, updated the Transfer Syntax
self.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian
-
@property
def pixel_array(self):
"""Return the Pixel Data as a NumPy array.
@@ -1233,3 +1233,29 @@ class FileDataset(Dataset):
if self.filename and os.path.exists(self.filename):
statinfo = os.stat(self.filename)
self.timestamp = statinfo.st_mtime
+
+ def __eq__(self, other):
+ """Compare `self` and `other` for equality.
+
+ Returns
+ -------
+ bool
+ The result if `self` and `other` are the same class
+ NotImplemented
+ If `other` is not the same class as `self` then returning
+ NotImplemented delegates the result to superclass.__eq__(subclass)
+ """
+ # When comparing against self this will be faster
+ if other is self:
+ return True
+
+ if isinstance(other, self.__class__):
+ # Compare Elements using values() and class members using __dict__
+ # Convert values() to a list for compatibility between
+ # python 2 and 3
+ # Sort values() by element tag
+ self_elem = sorted(list(self.values()), key=lambda x: x.tag)
+ other_elem = sorted(list(other.values()), key=lambda x: x.tag)
+ return self_elem == other_elem and self.__dict__ == other.__dict__
+
+ return NotImplemented
diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index ceb809915..01d9a4911 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -226,7 +226,10 @@ def write_PN(fp, data_element, padding=b' ', encoding=None):
val = data_element.value
if isinstance(val[0], compat.text_type) or not in_py2:
- val = [elem.encode(encoding) for elem in val]
+ try:
+ val = [elem.encode(encoding) for elem in val]
+ except TypeError:
+ val = [elem.encode(encoding[0]) for elem in val]
val = b'\\'.join(val)
No encoding with write_PN raises TypeError
#### Description
`filewriter.write_PN()` with `encoding=None` (default) raises `TypeError`
#### Steps/Code to Reproduce
```python
>>> from pydicom.filebase import DicomBytesIO
>>> from pydicom.dataelem import DataElement
>>> from pydicom.filewriter import write_PN
>>> fp = DicomBytesIO()
>>> fp.is_little_endian = True
>>> elem = DataElement(0x00100010, 'PN', u'\u03b8')
>>> write_PN(fp, elem)
```
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "../pydicom/pydicom/filewriter.py", line 228, in write_PN
val = [elem.encode(encoding) for elem in val]
TypeError: encode() argument 1 must be string, not list
```
Occurs because when `encoding=None`, `encoding = [default] * 3` is used.
I'm not sure what the impact is in practise since `write_PN` only seems to get called internally with encoding not set to None.
#### Versions
Python 2.7
pydicom/pydicom
diff --git a/pydicom/tests/test_dataset.py b/pydicom/tests/test_dataset.py
index fee7082bc..822b04e79 100644
--- a/pydicom/tests/test_dataset.py
+++ b/pydicom/tests/test_dataset.py
@@ -403,32 +403,35 @@ class DatasetTests(unittest.TestCase):
def testEqualityNoSequence(self):
"""Dataset: equality returns correct value with simple dataset"""
+ # Test empty dataset
+ assert Dataset() == Dataset()
+
d = Dataset()
d.SOPInstanceUID = '1.2.3.4'
d.PatientName = 'Test'
- self.assertTrue(d == d)
+ assert d == d
e = Dataset()
e.PatientName = 'Test'
e.SOPInstanceUID = '1.2.3.4'
- self.assertTrue(d == e)
+ assert d == e
e.SOPInstanceUID = '1.2.3.5'
- self.assertFalse(d == e)
+ assert not d == e
# Check VR
del e.SOPInstanceUID
e.add(DataElement(0x00080018, 'PN', '1.2.3.4'))
- self.assertFalse(d == e)
+ assert not d == e
# Check Tag
del e.SOPInstanceUID
e.StudyInstanceUID = '1.2.3.4'
- self.assertFalse(d == e)
+ assert not d == e
# Check missing Element in self
e.SOPInstanceUID = '1.2.3.4'
- self.assertFalse(d == e)
+ assert not d == e
# Check missing Element in other
d = Dataset()
@@ -437,7 +440,7 @@ class DatasetTests(unittest.TestCase):
e = Dataset()
e.SOPInstanceUID = '1.2.3.4'
- self.assertFalse(d == e)
+ assert not d == e
def testEqualityPrivate(self):
"""Dataset: equality returns correct value"""
@@ -500,16 +503,14 @@ class DatasetTests(unittest.TestCase):
def testEqualityUnknown(self):
"""Dataset: equality returns correct value with extra members """
+ # Non-element class members are ignored in equality testing
d = Dataset()
d.SOPEustaceUID = '1.2.3.4'
- self.assertTrue(d == d)
+ assert d == d
e = Dataset()
- e.SOPEustaceUID = '1.2.3.4'
- self.assertTrue(d == e)
-
e.SOPEustaceUID = '1.2.3.5'
- self.assertFalse(d == e)
+ assert d == e
def testEqualityInheritance(self):
"""Dataset: equality returns correct value for subclass """
@@ -529,6 +530,19 @@ class DatasetTests(unittest.TestCase):
self.assertFalse(d == e)
self.assertFalse(e == d)
+ def test_equality_elements(self):
+ """Test that Dataset equality only checks DataElements."""
+ d = Dataset()
+ d.SOPInstanceUID = '1.2.3.4'
+ d.PatientName = 'Test'
+ d.foo = 'foo'
+ assert d == d
+
+ e = Dataset()
+ e.PatientName = 'Test'
+ e.SOPInstanceUID = '1.2.3.4'
+ assert d == e
+
def test_inequality(self):
"""Test inequality operator"""
d = Dataset()
diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index 9ed2347ea..6e829300b 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -1826,7 +1826,6 @@ class TestWriteNumbers(object):
class TestWritePN(object):
"""Test filewriter.write_PN"""
- @pytest.mark.skip("Raises exception due to issue #489")
def test_no_encoding_unicode(self):
"""If PN element has no encoding info, default is used"""
fp = DicomBytesIO()
@@ -1979,12 +1978,11 @@ class TestWriteNumbers(object):
class TestWritePN(object):
"""Test filewriter.write_PN"""
- @pytest.mark.skip("Raises exception due to issue #489")
def test_no_encoding_unicode(self):
"""If PN element as no encoding info, default is used"""
fp = DicomBytesIO()
fp.is_little_endian = True
- elem = DataElement(0x00100010, 'PN', u'\u03b8')
+ elem = DataElement(0x00100010, 'PN', u'\u00e8')
write_PN(fp, elem)
def test_no_encoding(self):
diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
index 83b97cb..9191222 100644
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -2,15 +2,19 @@ from __future__ import print_function
from __future__ import unicode_literals
import io
+import logging
import os.path
import sys
from pre_commit import output
+from pre_commit.util import cmd_output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
+logger = logging.getLogger(__name__)
+
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
@@ -36,6 +40,13 @@ def install(
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
+ if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
+ logger.error(
+ 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
+ 'hint: `git config --unset-all core.hooksPath`',
+ )
+ return 1
+
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
Handle when `core.hooksPath` is set?
As we found in https://github.com/pre-commit/pre-commit-hooks/issues/250, pre-commit (despite being installed) will be silently skipped if `code.hooksPath` is set.
A few options:
- during `pre-commit install`, check this variable and warn
- "" but error
- install into the directory at `core.hooksPath` (but it may be outside the working dir? probably not the best idea to write to it)
jasonkriss: @alykhantejani I just added MeanPairwiseDistance to this PR.
diff --git a/ignite/metrics/__init__.py b/ignite/metrics/__init__.py
index 3902c5de..2cc4e1ff 100644
--- a/ignite/metrics/__init__.py
+++ b/ignite/metrics/__init__.py
@@ -1,3 +1,8 @@
+from .binary_accuracy import BinaryAccuracy
from .categorical_accuracy import CategoricalAccuracy
+from .mean_absolute_error import MeanAbsoluteError
+from .mean_pairwise_distance import MeanPairwiseDistance
from .mean_squared_error import MeanSquaredError
from .metric import Metric
+from .root_mean_squared_error import RootMeanSquaredError
+from .top_k_categorical_accuracy import TopKCategoricalAccuracy
diff --git a/ignite/metrics/binary_accuracy.py b/ignite/metrics/binary_accuracy.py
new file mode 100644
index 00000000..3b33a4db
--- /dev/null
+++ b/ignite/metrics/binary_accuracy.py
@@ -0,0 +1,28 @@
+from __future__ import division
+
+import torch
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class BinaryAccuracy(Metric):
+ """
+ Calculates the binary accuracy.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def reset(self):
+ self._num_correct = 0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ correct = torch.eq(torch.round(y_pred).type(torch.LongTensor), y)
+ self._num_correct += torch.sum(correct)
+ self._num_examples += correct.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('BinaryAccuracy must have at least one example before it can be computed')
+ return self._num_correct / self._num_examples
diff --git a/ignite/metrics/mean_absolute_error.py b/ignite/metrics/mean_absolute_error.py
new file mode 100644
index 00000000..88c620de
--- /dev/null
+++ b/ignite/metrics/mean_absolute_error.py
@@ -0,0 +1,28 @@
+from __future__ import division
+
+import torch
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class MeanAbsoluteError(Metric):
+ """
+ Calculates the mean absolute error.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def reset(self):
+ self._sum_of_absolute_errors = 0.0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ absolute_errors = torch.abs(y_pred - y.view_as(y_pred))
+ self._sum_of_absolute_errors += torch.sum(absolute_errors)
+ self._num_examples += y.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed')
+ return self._sum_of_absolute_errors / self._num_examples
diff --git a/ignite/metrics/mean_pairwise_distance.py b/ignite/metrics/mean_pairwise_distance.py
new file mode 100644
index 00000000..b18be661
--- /dev/null
+++ b/ignite/metrics/mean_pairwise_distance.py
@@ -0,0 +1,34 @@
+from __future__ import division
+
+import torch
+from torch.nn.functional import pairwise_distance
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class MeanPairwiseDistance(Metric):
+ """
+ Calculates the mean pairwise distance.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def __init__(self, p=2, eps=1e-6):
+ super(MeanPairwiseDistance, self).__init__()
+ self._p = p
+ self._eps = eps
+
+ def reset(self):
+ self._sum_of_distances = 0.0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ distances = pairwise_distance(y_pred, y, p=self._p, eps=self._eps)
+ self._sum_of_distances += torch.sum(distances)
+ self._num_examples += y.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed')
+ return self._sum_of_distances / self._num_examples
diff --git a/ignite/metrics/root_mean_squared_error.py b/ignite/metrics/root_mean_squared_error.py
new file mode 100644
index 00000000..47e9f396
--- /dev/null
+++ b/ignite/metrics/root_mean_squared_error.py
@@ -0,0 +1,18 @@
+from __future__ import division
+import math
+
+import torch
+
+from .mean_squared_error import MeanSquaredError
+from ignite.exceptions import NotComputableError
+
+
+class RootMeanSquaredError(MeanSquaredError):
+ """
+ Calculates the root mean squared error.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def compute(self):
+ mse = super(RootMeanSquaredError, self).compute()
+ return math.sqrt(mse)
diff --git a/ignite/metrics/top_k_categorical_accuracy.py b/ignite/metrics/top_k_categorical_accuracy.py
new file mode 100644
index 00000000..1ee90353
--- /dev/null
+++ b/ignite/metrics/top_k_categorical_accuracy.py
@@ -0,0 +1,34 @@
+from __future__ import division
+
+import torch
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class TopKCategoricalAccuracy(Metric):
+ """
+ Calculates the top-k categorical accuracy.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def __init__(self, k=5):
+ super(TopKCategoricalAccuracy, self).__init__()
+ self._k = k
+
+ def reset(self):
+ self._num_correct = 0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ sorted_indices = torch.topk(y_pred, self._k, dim=1)[1]
+ expanded_y = y.view(-1, 1).expand(-1, self._k)
+ correct = torch.sum(torch.eq(sorted_indices, expanded_y), dim=1)
+ self._num_correct += torch.sum(correct)
+ self._num_examples += correct.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('TopKCategoricalAccuracy must have at least one example before it can be computed')
+ return self._num_correct / self._num_examples
Add pairwise distance to Metrics
I think in evaluation of regression task, pairwise distance, especially norm-2 distance, as in `torch.nn.functional.pairwise_distance` is at least as frequently used as MSE, which is actually mostly used as loss rather than evaluation metrics. Therefore, I was wondering if it is worthy of being added to Metrics package as a commonly used metrics.
diff --git a/marshmallow/decorators.py b/marshmallow/decorators.py
index 8b6df0df..cd850fc0 100644
--- a/marshmallow/decorators.py
+++ b/marshmallow/decorators.py
@@ -107,6 +107,9 @@ def post_dump(fn=None, pass_many=False, pass_original=False):
By default, receives a single object at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
+
+ If ``pass_original=True``, the original data (before serializing) will be passed as
+ an additional argument to the method.
"""
return tag_processor(POST_DUMP, fn, pass_many, pass_original=pass_original)
@@ -129,6 +132,9 @@ def post_load(fn=None, pass_many=False, pass_original=False):
By default, receives a single datum at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
+
+ If ``pass_original=True``, the original data (before deserializing) will be passed as
+ an additional argument to the method.
"""
return tag_processor(POST_LOAD, fn, pass_many, pass_original=pass_original)
diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 79bb8ee1..904c5322 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -869,8 +869,8 @@ class BaseSchema(base.SchemaABC):
data = utils.if_none(processor(data, many), data)
elif many:
if pass_original:
- data = [utils.if_none(processor(item, original_data), item)
- for item in data]
+ data = [utils.if_none(processor(item, original), item)
+ for item, original in zip(data, original_data)]
else:
data = [utils.if_none(processor(item), item) for item in data]
else:
post_dump is passing a list of objects as original object
Hi,
I think post_dump with pass_original=True should pass the original object related to the data serialized and not a list of objects which this object belongs to.
``` python
from marshmallow import fields, post_dump, Schema
class DeviceSchema(Schema):
id = fields.String()
@post_dump(pass_original=True)
def __post_dump(self, data, obj):
print(obj) # <-- this is a list
devices = [dict(id=1), dict(id=2)]
DeviceSchema().dump(devices, many=True)
```
In the above example, the parameter `obj` is a list of devices rather than the device object itself.
What do you think?
pep8speaks: Hello @ksunden! Thanks for submitting the PR.
- In the file [`WrightTools/kit/_array.py`](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/WrightTools/kit/_array.py), following are the PEP8 issues :
> [Line 216:35](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/WrightTools/kit/_array.py#L216): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:'
- In the file [`tests/kit/remove_nans_1D.py`](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/tests/kit/remove_nans_1D.py), following are the PEP8 issues :
> [Line 30:1](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/tests/kit/remove_nans_1D.py#L30): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
ksunden: ```
>>> a = np.array([np.nan, 1, 2, 2])
>>> np.isnan(a)
array([ True, False, False, False])
>>> not np.isnan(a)
Traceback (most recent call last):
File "<input>", line 1, in <module>
not np.isnan(a)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any()
or a.all()
>>> np.isnan(a) == False
array([False, True, True, True])
>>> np.isnan(a) is False
False
```
Hush, pep8speaks, that doesn't work here....
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
index 66cfb11..16136f1 100644
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -210,17 +210,10 @@ def remove_nans_1D(*args):
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed.
"""
- # find all indicies to keep
- bads = np.array([])
- for arr in args:
- bad = np.array(np.where(np.isnan(arr))).flatten()
- bads = np.hstack((bad, bads))
- if hasattr(args, 'shape') and len(args.shape) == 1:
- goods = [i for i in np.arange(args.shape[0]) if i not in bads]
- else:
- goods = [i for i in np.arange(len(args[0])) if i not in bads]
- # apply
- return tuple(a[goods] for a in args)
+ vals = np.isnan(args[0])
+ for a in args:
+ vals |= np.isnan(a)
+ return tuple(np.array(a)[vals == False] for a in args)
def share_nans(*arrs):
remove_nans_1D fails for list
```
>>> wt.kit.remove_nans_1D([np.nan, 1, 2, 2])
Traceback (most recent call last):
File "<input>", line 1, in <module>
wt.kit.remove_nans_1D([np.nan, 1, 2, 2])
File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in rem
ove_nans_1D
return tuple(a[goods] for a in args)
File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in <ge
nexpr>
return tuple(a[goods] for a in args)
TypeError: list indices must be integers or slices, not list
>>> wt.kit.remove_nans_1D(np.array([np.nan, 1, 2, 2]))
(array([1., 2., 2.]),)
```
diff --git a/b2/sync/scan_policies.py b/b2/sync/scan_policies.py
index 198c079..dfb9413 100644
--- a/b2/sync/scan_policies.py
+++ b/b2/sync/scan_policies.py
@@ -27,10 +27,45 @@ class RegexSet(object):
return any(c.match(s) is not None for c in self._compiled_list)
+def convert_dir_regex_to_dir_prefix_regex(dir_regex):
+ """
+ The patterns used to match directory names (and file names) are allowed
+ to match a prefix of the name. This 'feature' was unintentional, but is
+ being retained for compatibility.
+
+ This means that a regex that matches a directory name can't be used directly
+ to match against a file name and test whether the file should be excluded
+ because it matches the directory.
+
+ The pattern 'photos' will match directory names 'photos' and 'photos2',
+ and should exclude files 'photos/kitten.jpg', and 'photos2/puppy.jpg'.
+ It should not exclude 'photos.txt', because there is no directory name
+ that matches.
+
+ On the other hand, the pattern 'photos$' should match 'photos/kitten.jpg',
+ but not 'photos2/puppy.jpg', nor 'photos.txt'
+
+ If the original regex is valid, there are only two cases to consider:
+ either the regex ends in '$' or does not.
+ """
+ if dir_regex.endswith('$'):
+ return dir_regex[:-1] + r'/'
+ else:
+ return dir_regex + r'.*?/'
+
+
class ScanPoliciesManager(object):
"""
Policy object used when scanning folders for syncing, used to decide
which files to include in the list of files to be synced.
+
+ Code that scans through files should at least use should_exclude_file()
+ to decide whether each file should be included; it will check include/exclude
+ patterns for file names, as well as patterns for excluding directeries.
+
+ Code that scans may optionally use should_exclude_directory() to test whether
+ it can skip a directory completely and not bother listing the files and
+ sub-directories in it.
"""
def __init__(
@@ -40,6 +75,9 @@ class ScanPoliciesManager(object):
include_file_regexes=tuple(),
):
self._exclude_dir_set = RegexSet(exclude_dir_regexes)
+ self._exclude_file_because_of_dir_set = RegexSet(
+ map(convert_dir_regex_to_dir_prefix_regex, exclude_dir_regexes)
+ )
self._exclude_file_set = RegexSet(exclude_file_regexes)
self._include_file_set = RegexSet(include_file_regexes)
@@ -51,8 +89,12 @@ class ScanPoliciesManager(object):
being scanned.
:return: True iff excluded.
"""
- return self._exclude_file_set.matches(file_path) and \
- not self._include_file_set.matches(file_path)
+ exclude_because_of_dir = self._exclude_file_because_of_dir_set.matches(file_path)
+ exclude_because_of_file = (
+ self._exclude_file_set.matches(file_path) and
+ not self._include_file_set.matches(file_path)
+ )
+ return exclude_because_of_dir or exclude_because_of_file
def should_exclude_directory(self, dir_path):
"""
--excludeDirRegex does not work when source is B2
The new filtering that lets you exclude an entire directory works in the `LocalFolder` class, but not the `B2Folder` class.
I think there are two possible approaches to fixing it: (1) change B2Folder to simulate the existence of directories, and check them for exclusion, or (2) extend `ScanPoliciesManager.should_exclude_file` to also test whether any of the directories in the path are excluded. I like #2, but I think it would need optimization to avoid checking every parent directory of every file.
vitaly-krugl: @lukebakken, I haven't forgotten - will catch up in the next couple of days.
lukebakken: @vitaly-krugl no hurry at all! Thanks again.
lukebakken: @vitaly-krugl the `queue.declare` method never makes it to RabbitMQ. An `AssertionError` is thrown [here](https://github.com/pika/pika/blob/master/pika/spec.py#L1003-L1004) which gums up the works when the `with` clause tries to exit.
vitaly-krugl: Would this alone punch at the heart of the problem [here](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1382)?
```
try:
self._send_method(method)
except Exception:
self._blocking = None
raise
```
lukebakken: I'll try that out.
vitaly-krugl: Don't try it out just yet, I missed something there (and the fix in the PR did, too, I think)
vitaly-krugl: The [if acceptable_replies:](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1361) block sets `self._blocking` and also registers a number of callbacks. If `self._send_method(method)` (as in this case), we really don't want any of those registered callbacks to remain there either.
Since a successful `self._send_method(method)` call will ultimately just enqueue some data on the output write buffer, it should be possible to move the (https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1361) block after [self._send_method(method)](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1382).
Furthermore, to ensure that an incomplete message doesn't get placed in the output buffer (due to marshaling failure of one of its subframes), [Connection._send_message()](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/connection.py#L2273) needs to be modified to pre-marshal all of its parts and then append them to the output frame buffer only after all marshaling is done, updating the stats and finally calling `self._flush_outbound() ` and `self._detect_backpressure()` like [Connection._send_frame()](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/connection.py#L2251-L2257). To this end, `Connection._send_message()` and `Connection._send_frame()` should share a method (e.g., `Connection._output_marshaled_frame()` that updates `self.bytes_sent` and `self.frames_sent` and appends the marshaled frame data to `self.outbound_buffer` .
lukebakken: @vitaly-krugl - ready for re-review. Thanks!
lukebakken: @vitaly-krugl - I have merged in the tests you provided and this is ready for another review. Thanks!
vitaly-krugl: @lukebakken, I renamed this PR "Request marshaling error should not corrupt a channel", which reflects issues #990 and #912 more accurately.
vitaly-krugl: On broker's Channel.Close, the draining is necessary because ANQP says to
ignore all incoming requests after channel is closed except
Channel.Close. So, draining in that case helps break the gridlock.
However, in the case the client is closing the channel with some blocking
requests still pending normally, we have a perfectly healthy channel and
nothing special is needed. The normal course of events will see it through.
On Tue, Apr 10, 2018, 5:00 AM Luke Bakken <[email protected]> wrote:
> *@lukebakken* commented on this pull request.
> ------------------------------
>
> In pika/channel.py
> <https://github.com/pika/pika/pull/991#discussion_r180392933>:
>
> > @@ -1327,9 +1327,10 @@ def _on_synchronous_complete(self, _method_frame_unused):
> while self._blocked and self._blocking is None:
> self._rpc(*self._blocked.popleft())
>
> - def _drain_blocked_methods_on_remote_close(self):
>
> If we think we need an "emergency channel-close" method that purges
> queued-up requests (I don't think we do
>
> Draining blocked methods on a broker-initiated close was introduced in
> #957 <https://github.com/pika/pika/pull/957> - please check that PR out
> again. I still think it's necessary.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/pika/pika/pull/991#discussion_r180392933>, or mute
> the thread
> <https://github.com/notifications/unsubscribe-auth/ABX9KigMj1hv6PIavaR70oqFZk9LqB0iks5tnJ7igaJpZM4Sg71J>
> .
>
vitaly-krugl: @lukebakken, is this PR ready for re-review?
vitaly-krugl: I think I might not have ended that review ??
On Mon, Apr 16, 2018, 6:22 AM Luke Bakken <[email protected]> wrote:
> @vitaly-krugl <https://github.com/vitaly-krugl> if I re-select your name
> in the "Reviewers" dropdown, the status icon changes back to an orange disk
> ... do you not get a new email saying I re-requested a review? I assumed
> that you did. If you don't get an email, I can @-mention you in a comment.
> Thanks for the re-re-reviews 😄
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/pika/pika/pull/991#issuecomment-381597285>, or mute
> the thread
> <https://github.com/notifications/unsubscribe-auth/ABX9KmC9Xn-wJn2vurcgpPYZe9kJuVEtks5tpJsrgaJpZM4Sg71J>
> .
>
lukebakken: @vitaly-krugl thanks! Sorry I missed the previous comment about that test.
lukebakken: @vitaly-krugl - changes made. I'll merge this once builds complete. Thanks!
diff --git a/pika/channel.py b/pika/channel.py
index fb67a0d..282f53c 100644
--- a/pika/channel.py
+++ b/pika/channel.py
@@ -1347,7 +1347,7 @@ class Channel(object):
sent, and thus its completion callback would never be called.
"""
- LOGGER.debug('Draining %i blocked frames due to remote Channel.Close',
+ LOGGER.debug('Draining %i blocked frames due to broker-requested Channel.Close',
len(self._blocked))
while self._blocked:
method = self._blocked.popleft()[0]
@@ -1408,6 +1408,12 @@ class Channel(object):
self._blocked.append([method, callback, acceptable_replies])
return
+ # Note: _send_method can throw exceptions if there are framing errors
+ # or invalid data passed in. Call it here to prevent self._blocking
+ # from being set if an exception is thrown. This also prevents
+ # acceptable_replies registering callbacks when exceptions are thrown
+ self._send_method(method)
+
# If acceptable replies are set, add callbacks
if acceptable_replies:
# Block until a response frame is received for synchronous frames
@@ -1430,8 +1436,6 @@ class Channel(object):
self.callbacks.add(self.channel_number, reply, callback,
arguments=arguments)
- self._send_method(method)
-
def _raise_if_not_open(self):
"""If channel is not in the OPEN state, raises ChannelClosed with
`reply_code` and `reply_text` corresponding to current state. If channel
diff --git a/pika/connection.py b/pika/connection.py
index be2b1bc..271b198 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -2306,11 +2306,7 @@ class Connection(pika.compat.AbstractBase):
'Attempted to send a frame on closed connection.')
marshaled_frame = frame_value.marshal()
- self.bytes_sent += len(marshaled_frame)
- self.frames_sent += 1
- self._adapter_emit_data(marshaled_frame)
- if self.params.backpressure_detection:
- self._detect_backpressure()
+ self._output_marshaled_frames([marshaled_frame])
def _send_method(self, channel_number, method, content=None):
"""Constructs a RPC method frame and then sends it to the broker.
@@ -2336,8 +2332,14 @@ class Connection(pika.compat.AbstractBase):
"""
length = len(content[1])
- self._send_frame(frame.Method(channel_number, method_frame))
- self._send_frame(frame.Header(channel_number, length, content[0]))
+ marshaled_body_frames = []
+
+ # Note: we construct the Method, Header and Content objects, marshal them
+ # *then* output in case the marshaling operation throws an exception
+ frame_method = frame.Method(channel_number, method_frame)
+ frame_header = frame.Header(channel_number, length, content[0])
+ marshaled_body_frames.append(frame_method.marshal())
+ marshaled_body_frames.append(frame_header.marshal())
if content[1]:
chunks = int(math.ceil(float(length) / self._body_max_length))
@@ -2346,7 +2348,10 @@ class Connection(pika.compat.AbstractBase):
end = start + self._body_max_length
if end > length:
end = length
- self._send_frame(frame.Body(channel_number, content[1][start:end]))
+ frame_body = frame.Body(channel_number, content[1][start:end])
+ marshaled_body_frames.append(frame_body.marshal())
+
+ self._output_marshaled_frames(marshaled_body_frames)
def _set_connection_state(self, connection_state):
"""Set the connection state.
@@ -2382,3 +2387,16 @@ class Connection(pika.compat.AbstractBase):
"""
self._frame_buffer = self._frame_buffer[byte_count:]
self.bytes_received += byte_count
+
+ def _output_marshaled_frames(self, marshaled_frames):
+ """Output list of marshaled frames to buffer and update stats
+
+ :param list marshaled_frames: A list of frames marshaled to bytes
+
+ """
+ for marshaled_frame in marshaled_frames:
+ self.bytes_sent += len(marshaled_frame)
+ self.frames_sent += 1
+ self._adapter_emit_data(marshaled_frame)
+ if self.params.backpressure_detection:
+ self._detect_backpressure()
BlockingChannel.queue_declare hanging on non-string queue parameters
Under Python 3.6.4 and Pika 0.11.2, the `BlockingChannel.queue_declare` method hangs when setting its `queue` parameter to a value that is not of `str` type (e.g., `int`, `bool`, `list`, `dict`, `tuple`, `NoneType`).
Input:
```
$ python3 <<EOF
import pika
with pika.BlockingConnection() as connection:
channel = connection.channel()
channel.queue_declare(queue=[1, 2, 3])
EOF
```
pika/pika
diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py
index 2a51ae1..967e109 100644
--- a/tests/acceptance/async_adapter_tests.py
+++ b/tests/acceptance/async_adapter_tests.py
@@ -625,8 +625,9 @@ class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
raise AssertionError("Should not have received an Exchange.DeclareOk")
-class TestPassiveExchangeDeclareWithConcurrentClose(AsyncTestCase, AsyncAdapters):
- DESCRIPTION = "should close channel: declare passive exchange with close"
+class TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker(
+ AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "No deadlock when closing a channel with pending blocked requests and concurrent Channel.Close from broker."
# To observe the behavior that this is testing, comment out this line
# in pika/channel.py - _on_close:
@@ -636,10 +637,12 @@ class TestPassiveExchangeDeclareWithConcurrentClose(AsyncTestCase, AsyncAdapters
# With the above line commented out, this test will hang
def begin(self, channel):
- self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex
+ base_exch_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
self.channel.add_on_close_callback(self.on_channel_closed)
for i in range(0, 99):
- exch_name = self.name + ':' + str(i)
+ # Passively declare a non-existent exchange to force Channel.Close
+ # from broker
+ exch_name = base_exch_name + ':' + str(i)
cb = functools.partial(self.on_bad_result, exch_name)
channel.exchange_declare(exch_name,
exchange_type='direct',
@@ -648,15 +651,49 @@ class TestPassiveExchangeDeclareWithConcurrentClose(AsyncTestCase, AsyncAdapters
channel.close()
def on_channel_closed(self, channel, reply_code, reply_text):
+ # The close is expected because the requested exchange doesn't exist
self.stop()
def on_bad_result(self, exch_name, frame):
- self.channel.exchange_delete(exch_name)
- raise AssertionError("Should not have received an Exchange.DeclareOk")
+ self.fail("Should not have received an Exchange.DeclareOk")
-class TestQueueDeclareAndDelete(AsyncTestCase, AsyncAdapters):
- DESCRIPTION = "Create and delete a queue"
+class TestClosingAChannelPermitsBlockedRequestToComplete(AsyncTestCase,
+ AsyncAdapters):
+ DESCRIPTION = "Closing a channel permits blocked requests to complete."
+
+ def begin(self, channel):
+ self._queue_deleted = False
+
+ channel.add_on_close_callback(self.on_channel_closed)
+
+ q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
+ # NOTE we pass callback to make it a blocking request
+ channel.queue_declare(q_name,
+ exclusive=True,
+ callback=lambda _frame: None)
+
+ self.assertIsNotNone(channel._blocking)
+
+ # The Queue.Delete should block on completion of Queue.Declare
+ channel.queue_delete(q_name, callback=self.on_queue_deleted)
+ self.assertTrue(channel._blocked)
+
+ # This Channel.Close should allow the blocked Queue.Delete to complete
+ # Before closing the channel
+ channel.close()
+
+ def on_queue_deleted(self, _frame):
+ # Getting this callback shows that the blocked request was processed
+ self._queue_deleted = True
+
+ def on_channel_closed(self, _channel, _reply_code, _reply_text):
+ self.assertTrue(self._queue_deleted)
+ self.stop()
+
+
+class TestQueueUnnamedDeclareAndDelete(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "Create and delete an unnamed queue"
def begin(self, channel):
channel.queue_declare(queue='',
@@ -673,11 +710,11 @@ class TestQueueDeclareAndDelete(AsyncTestCase, AsyncAdapters):
def on_queue_delete(self, frame):
self.assertIsInstance(frame.method, spec.Queue.DeleteOk)
+ # NOTE: with event loops that suppress exceptions from callbacks
self.stop()
-
-class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters):
+class TestQueueNamedDeclareAndDelete(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Create and delete a named queue"
def begin(self, channel):
@@ -701,7 +738,6 @@ class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters):
self.stop()
-
class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Should close chan: re-declared queue w/ diff params"
@@ -745,7 +781,6 @@ class TestTX1_Select(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
self.stop()
-
class TestTX2_Commit(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Start a transaction, and commit it"
diff --git a/tests/acceptance/blocking_adapter_test.py b/tests/acceptance/blocking_adapter_test.py
index d79ded2..d0ed48e 100644
--- a/tests/acceptance/blocking_adapter_test.py
+++ b/tests/acceptance/blocking_adapter_test.py
@@ -50,7 +50,6 @@ def setUpModule():
logging.basicConfig(level=logging.DEBUG)
-#@unittest.skip('SKIPPING WHILE DEBUGGING SOME CHANGES. DO NOT MERGE LIKE THIS')
class BlockingTestCaseBase(unittest.TestCase):
TIMEOUT = DEFAULT_TIMEOUT
@@ -355,6 +354,16 @@ class TestCreateAndCloseConnectionWithChannelAndConsumer(BlockingTestCaseBase):
self.assertFalse(ch._impl._consumers)
+class TestUsingInvalidQueueArgument(BlockingTestCaseBase):
+ def test(self):
+ """BlockingConnection raises expected exception when invalid queue parameter is used
+ """
+ connection = self._connect()
+ ch = connection.channel()
+ with self.assertRaises(AssertionError):
+ ch.queue_declare(queue=[1, 2, 3])
+
+
class TestSuddenBrokerDisconnectBeforeChannel(BlockingTestCaseBase):
def test(self):
diff --git a/tests/unit/channel_tests.py b/tests/unit/channel_tests.py
index 10e594e..dc353ef 100644
--- a/tests/unit/channel_tests.py
+++ b/tests/unit/channel_tests.py
@@ -1587,3 +1587,18 @@ class ChannelTests(unittest.TestCase):
self.assertRaises(TypeError,
self.obj._validate_rpc_completion_callback,
'foo')
+
+ def test_no_side_effects_from_send_method_error(self):
+ self.obj._set_state(self.obj.OPEN)
+
+ self.assertIsNone(self.obj._blocking)
+
+ with mock.patch.object(self.obj.callbacks, 'add') as cb_add_mock:
+ with mock.patch.object(self.obj, '_send_method',
+ side_effect=TypeError) as send_method_mock:
+ with self.assertRaises(TypeError):
+ self.obj.queue_delete('', callback=lambda _frame: None)
+
+ self.assertEqual(send_method_mock.call_count, 1)
+ self.assertIsNone(self.obj._blocking)
+ self.assertEqual(cb_add_mock.call_count, 0)
diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py
index 19df873..04fd543 100644
--- a/tests/unit/connection_tests.py
+++ b/tests/unit/connection_tests.py
@@ -983,3 +983,31 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Make sure _detect_backpressure doesn't throw
self.connection._detect_backpressure()
+
+
+ def test_no_side_effects_from_message_marshal_error(self):
+ # Verify that frame buffer is empty on entry
+ self.assertEqual(b'', self.connection._frame_buffer)
+
+ # Use Basic.Public with invalid body to trigger marshalling error
+ method = spec.Basic.Publish()
+ properties = spec.BasicProperties()
+ # Verify that marshalling of method and header won't trigger error
+ frame.Method(1, method).marshal()
+ frame.Header(1, body_size=10, props=properties).marshal()
+ # Create bogus body that should trigger an error during marshalling
+ body = [1,2,3,4]
+ # Verify that frame body can be created using the bogus body, but
+ # that marshalling will fail
+ frame.Body(1, body)
+ with self.assertRaises(TypeError):
+ frame.Body(1, body).marshal()
+
+ # Now, attempt to send the method with the bogus body
+ with self.assertRaises(TypeError):
+ self.connection._send_method(channel_number=1,
+ method=method,
+ content=(properties, body))
+
+ # Now make sure that nothing is enqueued on frame buffer
+ self.assertEqual(b'', self.connection._frame_buffer)
diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py
index 9f228f5c5..21ecbc0ee 100644
--- a/nipype/utils/nipype2boutiques.py
+++ b/nipype/utils/nipype2boutiques.py
@@ -2,7 +2,7 @@
from __future__ import (print_function, division, unicode_literals,
absolute_import)
-from builtins import str, open
+from builtins import str, open, bytes
# This tool exports a Nipype interface in the Boutiques (https://github.com/boutiques) JSON format.
# Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms.
#
@@ -40,10 +40,12 @@ def generate_boutiques_descriptor(
raise Exception("Undefined module.")
# Retrieves Nipype interface
- if isinstance(module, str):
+ if isinstance(module, (str, bytes)):
import_module(module)
module_name = str(module)
module = sys.modules[module]
+ else:
+ module_name = str(module.__name__)
interface = getattr(module, interface_name)()
inputs = interface.input_spec()
@@ -249,7 +251,7 @@ def create_tempfile():
Creates a temp file and returns its name.
'''
fileTemp = tempfile.NamedTemporaryFile(delete=False)
- fileTemp.write("hello")
+ fileTemp.write(b"hello")
fileTemp.close()
return fileTemp.name
@@ -283,6 +285,8 @@ def must_generate_value(name, type, ignored_template_inputs, spec_info, spec,
# Best guess to detect string restrictions...
if "' or '" in spec_info:
return False
+ if spec.default or spec.default_value():
+ return False
if not ignored_template_inputs:
return True
return not (name in ignored_template_inputs)
UnboundLocalError: local variable 'module_name' referenced before assignment
### Summary
Discovered for myself `nipypecli` and decided to give it a try while composing cmdline invocation just following the errors it was spitting out at me and stopping when error didn't give a hint what I could have specified incorrectly:
```
$> nipypecli convert boutiques -m nipype.interfaces.ants.registration -i ANTS -o test
Traceback (most recent call last):
File "/usr/bin/nipypecli", line 11, in <module>
load_entry_point('nipype==1.0.1', 'console_scripts', 'nipypecli')()
File "/usr/lib/python2.7/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python2.7/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python2.7/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/nipype/scripts/cli.py", line 254, in boutiques
verbose, ignore_template_numbers)
File "/usr/lib/python2.7/dist-packages/nipype/utils/nipype2boutiques.py", line 56, in generate_boutiques_descriptor
'command-line'] = "nipype_cmd " + module_name + " " + interface_name + " "
UnboundLocalError: local variable 'module_name' referenced before assignment
```
diff --git a/esrally/config.py b/esrally/config.py
index a78ebf77..b8e0d79d 100644
--- a/esrally/config.py
+++ b/esrally/config.py
@@ -447,6 +447,7 @@ class ConfigFactory:
# the Elasticsearch directory is just the last path component (relative to the source root directory)
config["source"]["elasticsearch.src.subdir"] = io.basename(source_dir)
+ if gradle_bin:
config["build"] = {}
config["build"]["gradle.bin"] = gradle_bin
diff --git a/esrally/track/loader.py b/esrally/track/loader.py
index 513b1e4f..d04de55e 100644
--- a/esrally/track/loader.py
+++ b/esrally/track/loader.py
@@ -757,9 +757,10 @@ class TrackSpecificationReader:
Creates a track instances based on its parsed JSON description.
"""
- def __init__(self, override_auto_manage_indices=None, source=io.FileSource):
+ def __init__(self, override_auto_manage_indices=None, track_params=None, source=io.FileSource):
self.name = None
self.override_auto_manage_indices = override_auto_manage_indices
+ self.track_params = track_params if track_params else {}
self.source = source
self.index_op_type_warning_issued = False
@@ -770,7 +771,7 @@ class TrackSpecificationReader:
meta_data = self._r(track_specification, "meta", mandatory=False)
indices = [self._create_index(idx, mapping_dir)
for idx in self._r(track_specification, "indices", mandatory=False, default_value=[])]
- templates = [self._create_template(tpl, mapping_dir)
+ templates = [self._create_index_template(tpl, mapping_dir)
for tpl in self._r(track_specification, "templates", mandatory=False, default_value=[])]
corpora = self._create_corpora(self._r(track_specification, "corpora", mandatory=False, default_value=[]), indices)
# TODO: Remove this in Rally 0.10.0
@@ -816,7 +817,7 @@ class TrackSpecificationReader:
body_file = self._r(index_spec, "body", mandatory=False)
if body_file:
with self.source(os.path.join(mapping_dir, body_file), "rt") as f:
- body = json.load(f)
+ body = self._load_template(f.read(), index_name)
else:
body = None
@@ -832,15 +833,25 @@ class TrackSpecificationReader:
return track.Index(name=index_name, body=body, auto_managed=auto_managed, types=types)
- def _create_template(self, tpl_spec, mapping_dir):
+ def _create_index_template(self, tpl_spec, mapping_dir):
name = self._r(tpl_spec, "name")
index_pattern = self._r(tpl_spec, "index-pattern")
delete_matching_indices = self._r(tpl_spec, "delete-matching-indices", mandatory=False, default_value=True)
template_file = os.path.join(mapping_dir, self._r(tpl_spec, "template"))
with self.source(template_file, "rt") as f:
- template_content = json.load(f)
+ template_content = self._load_template(f.read(), name)
return track.IndexTemplate(name, index_pattern, template_content, delete_matching_indices)
+ def _load_template(self, contents, description):
+ try:
+ rendered = render_template(loader=jinja2.DictLoader({"default": contents}),
+ template_name="default",
+ template_vars=self.track_params)
+ return json.loads(rendered)
+ except (json.JSONDecodeError, jinja2.exceptions.TemplateError) as e:
+ logger.exception("Could not load file template for %s." % description)
+ raise TrackSyntaxError("Could not load file template for '%s'" % description, str(e))
+
def _create_corpora(self, corpora_specs, indices):
document_corpora = []
known_corpora_names = set()
Allow to use track parameters also in index / template definitions
Currently index definitions are read as is. However, it can be useful to allow track parameters also for index definition files. For consistency, we should allow the same for index templates.
Technically, this means that we will treat index definition files also as Jinja templates.
avirshup: Here's my stab at a fix for #219. Please let me know if this seems reasonable and/or if there are any changes that should be made.
diff --git a/setuptools_scm/hg.py b/setuptools_scm/hg.py
index 9322fb3..0ba1774 100644
--- a/setuptools_scm/hg.py
+++ b/setuptools_scm/hg.py
@@ -8,8 +8,14 @@ FILES_COMMAND = 'hg locate -I .'
def _hg_tagdist_normalize_tagcommit(root, tag, dist, node):
dirty = node.endswith('+')
node = 'h' + node.strip('+')
- revset = ("(branch(.) and tag({tag!r})::. and file('re:^(?!\.hgtags).*$')"
- " - tag({tag!r}))").format(tag=tag)
+
+ # Detect changes since the specified tag
+ revset = ("(branch(.)" # look for revisions in this branch only
+ " and tag({tag!r})::." # after the last tag
+ # ignore commits that only modify .hgtags and nothing else:
+ " and (merge() or file('re:^(?!\.hgtags).*$'))"
+ " and not tag({tag!r}))" # ignore the tagged commit itself
+ ).format(tag=tag)
if tag != '0.0':
commits = do(['hg', 'log', '-r', revset, '--template', '{node|short}'],
root)
Regression: hg repos with setuptools_scm>=1.15 are missing dev version bumps
`setuptools_scm` 1.15.* is reporting incorrect version numbers for my hg repositories.
Specifically, a merge commit that follows a tag commit does not seem to trigger the version bump like I would expect. In the screenshot below, for instance, I would expect the tip to be version `1.1.dev[N]+[sha]`. However, versions 1.15.* report the tip's version as `1.0`.
To reproduce:
```bash
mkdir hgmergetest && cd hgmergetest && hg init
# create initial commit and tag it
touch a && hg add a && hg commit -m "initial commit"
hg tag 1.0
# create a branch
hg branch branch1
touch b && hg add b && hg commit -m "create branch1"
# merge the branch into default
hg update default && hg merge branch1 && hg commit -m "merge branch1 into default"
```
Expected behavior (setuptools_scm 1.13 and 1.14):
```bash
> pip install "setuptools_scm<1.15"
> python -m setuptools_scm
Guessed Version 1.1.dev3+n8dce1535e70a
```
Behavior with `1.15.*`:
```bash
> pip install setuptools_scm==1.15.7
> python -m setuptools_scm
Guessed Version 1.0
```

pypa/setuptools_scm
diff --git a/testing/test_mercurial.py b/testing/test_mercurial.py
index 1fe6841..1d91444 100644
--- a/testing/test_mercurial.py
+++ b/testing/test_mercurial.py
@@ -108,3 +108,47 @@ def test_version_in_merge(wd):
def test_parse_no_worktree(tmpdir):
ret = parse(str(tmpdir))
assert ret is None
+
+
[email protected]
+def version_1_0(wd):
+ wd('hg branch default')
+ wd.commit_testfile()
+ wd('hg tag 1.0 -u test -d "0 0"')
+ return wd
+
+
[email protected]
+def pre_merge_commit_after_tag(wd, version_1_0):
+ wd('hg branch testbranch')
+ wd.write('branchfile', 'branchtext')
+ wd(wd.add_command)
+ wd.commit()
+ wd('hg update default')
+ wd('hg merge testbranch')
+ return wd
+
+
[email protected]("pre_merge_commit_after_tag")
+def test_version_bump_before_merge_commit(wd):
+ assert wd.version.startswith('1.1.dev1+')
+
+
[email protected](219)
[email protected]("pre_merge_commit_after_tag")
+def test_version_bump_from_merge_commit(wd):
+ wd.commit()
+ assert wd.version.startswith('1.1.dev3+') # issue 219
+
+
[email protected]("version_1_0")
+def test_version_bump_from_commit_including_hgtag_mods(wd):
+ """ Test the case where a commit includes changes to .hgtags and other files
+ """
+ with wd.cwd.join('.hgtags').open('a') as tagfile:
+ tagfile.write('0 0\n')
+ wd.write('branchfile', 'branchtext')
+ wd(wd.add_command)
+ assert wd.version.startswith('1.1.dev1+') # bump from dirty version
+ wd.commit() # commits both the testfile _and_ .hgtags
+ assert wd.version.startswith('1.1.dev2+')
diff --git a/samples/download_view_image.py b/samples/download_view_image.py
index 2da2320..b95a862 100644
--- a/samples/download_view_image.py
+++ b/samples/download_view_image.py
@@ -43,7 +43,7 @@ def main():
tableau_auth = TSC.TableauAuth(args.username, password, site_id=site_id)
server = TSC.Server(args.server)
# The new endpoint was introduced in Version 2.5
- server.version = 2.5
+ server.version = "2.5"
with server.auth.sign_in(tableau_auth):
# Step 2: Query for the view that we want an image of
diff --git a/tableauserverclient/server/endpoint/endpoint.py b/tableauserverclient/server/endpoint/endpoint.py
index deaa94a..e78b2e0 100644
--- a/tableauserverclient/server/endpoint/endpoint.py
+++ b/tableauserverclient/server/endpoint/endpoint.py
@@ -27,6 +27,17 @@ class Endpoint(object):
return headers
+ @staticmethod
+ def _safe_to_log(server_response):
+ '''Checks if the server_response content is not xml (eg binary image or zip)
+ and and replaces it with a constant
+ '''
+ ALLOWED_CONTENT_TYPES = ('application/xml',)
+ if server_response.headers.get('Content-Type', None) not in ALLOWED_CONTENT_TYPES:
+ return '[Truncated File Contents]'
+ else:
+ return server_response.content
+
def _make_request(self, method, url, content=None, request_object=None,
auth_token=None, content_type=None, parameters=None):
if request_object is not None:
@@ -50,7 +61,7 @@ class Endpoint(object):
return server_response
def _check_status(self, server_response):
- logger.debug(server_response.content)
+ logger.debug(self._safe_to_log(server_response))
if server_response.status_code not in Success_codes:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
This log line is overly chatty
https://github.com/tableau/server-client-python/blob/608aa7694d0560ea3c8c37b10127b11207e56e8d/tableauserverclient/server/endpoint/endpoint.py#L53
When using server client python to download workbooks or data sources and you've got log_level=Debug, this log line ends up blowing up your logs. It outputs the hexadecimal representation of the entire file you're downloading, which is not very helpful and explodes your log size. Can we remove this line, or only log out the response contents when you're not using the endpoint to download a file?
darcymason: I had a quick look at this and it looked fine, but then I tried commenting out the 'continue' lines to make the test fail, and got some strange warnings from values.py about unexpected length.
I've been travelling and haven't had a chance to try again or investigate further. Will look into it when I can, or perhaps someone else can give it a try -- I may have set something up incorrectly.
mrbean-bremen: Hm, if I do the same (e.g. replace `continue` with `pass`) I get:
```
def test_write_removes_grouplength(self):
ds = dcmread(color_pl_name)
assert 0x00080000 in ds
ds.save_as(self.file_out, write_like_original=True)
self.file_out.seek(0)
ds = dcmread(self.file_out)
# group length has been removed
> assert 0x00080000 not in ds
E AssertionError: assert 524288 not in (0008, 0000) Group Length UL: 480\n(0008, 0008) Image Type CS: ['ORIGIN...Group Length UL: 92172\n(7fe0, 0010) Pixel Data OW: Array of 92160 bytes
```
which looks ok to me.
darcymason: Okay, I finally cracked this - it had nothing to do with this latest code, but I couldn't leave the mystery. Well, it is still a little bit of a mystery.
It went away if I removed the other classes from test_filewriter.py. The second clue was that the error and warnings appeared twice as often as expected. Finally I noticed there were coming both from class WriteFileTests and from ScratchWriteDateTimeTests. Then finally noticed that the latter is *derived* from the former. Changed that to subclass from unittest.TestCase, and my issue went away, and the tests run normally with the `continue` line put back also. So I haven't tried to figure out exactly why it causes trouble in both classes when the one is derived from the other, but it certainly is the source. I suspect it is something to do with the temp file not being reset, and maybe the order of events depends on platform (I was testing on Windows, python 3.6.4).
So... @mrbean-bremen, if you don't mind updating that one line, then I'd be happy to merge this.
scaramallion: It looks like the change was made so that the `WriteFileTests` tests are run again but with `config.datetime_conversion = True`
darcymason: Yes, I think you are right, I remember that now that you bring it up. But somehow that is not working out when the new test fails. Well, it shouldn't fail, of course, so maybe it is a moot point, but it does seem the code is a bit fragile somehow. Personally I like the 'test first' philosophy where you write the new unit test (so that it fails) before fixing the main code.
darcymason: ... I'll merge this and add a separate issue for the test code problem.
diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index 01d9a4911..e283da86f 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -458,6 +458,9 @@ def write_dataset(fp, dataset, parent_encoding=default_encoding):
tags = sorted(dataset.keys())
for tag in tags:
+ # do not write retired Group Length (see PS3.5, 7.2)
+ if tag.element == 0 and tag.group > 6:
+ continue
with tag_in_exception(tag):
# write_data_element(fp, dataset.get_item(tag), dataset_encoding)
# XXX for writing raw tags without converting to DataElement
Update filewriter group length fields
_From [[email protected]](https://code.google.com/u/[email protected]/) on December 10, 2008 22:48:30_
It looks like filewriter module does not recalc group length fields except
for file meta info section. Should make this the case for all groups.
_Original issue: http://code.google.com/p/pydicom/issues/detail?id=30_
pydicom/pydicom
diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index f2bac9a81..362704140 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -39,6 +39,7 @@ ct_name = get_testdata_files("CT_small.dcm")[0]
mr_name = get_testdata_files("MR_small.dcm")[0]
jpeg_name = get_testdata_files("JPEG2000.dcm")[0]
no_ts = get_testdata_files("meta_missing_tsyntax.dcm")[0]
+color_pl_name = get_testdata_files("color-pl.dcm")[0]
datetime_name = mr_name
unicode_name = get_charset_files("chrH31.dcm")[0]
@@ -194,6 +195,15 @@ class WriteFileTests(unittest.TestCase):
ds = dcmread(fp, force=True)
assert ds[0xFFFFFFFF].value == b'123456'
+ def test_write_removes_grouplength(self):
+ ds = dcmread(color_pl_name)
+ assert 0x00080000 in ds
+ ds.save_as(self.file_out, write_like_original=True)
+ self.file_out.seek(0)
+ ds = dcmread(self.file_out)
+ # group length has been removed
+ assert 0x00080000 not in ds
+
class ScratchWriteDateTimeTests(WriteFileTests):
"""Write and reread simple or multi-value DA/DT/TM data elements"""
diff --git a/cfn_flip/yaml_dumper.py b/cfn_flip/yaml_dumper.py
index 85b287d..2a3a764 100644
--- a/cfn_flip/yaml_dumper.py
+++ b/cfn_flip/yaml_dumper.py
@@ -15,7 +15,9 @@ See the License for the specific language governing permissions and limitations
from cfn_clean.yaml_dumper import CleanCfnYamlDumper
from cfn_tools.odict import ODict
from cfn_tools.yaml_dumper import CfnYamlDumper
+import six
+TAG_STR = "tag:yaml.org,2002:str"
TAG_MAP = "tag:yaml.org,2002:map"
CONVERTED_SUFFIXES = ["Ref", "Condition"]
@@ -46,6 +48,13 @@ class LongCleanDumper(CleanCfnYamlDumper):
"""
+def string_representer(dumper, value):
+ if value.startswith("0"):
+ return dumper.represent_scalar(TAG_STR, value, style="'")
+
+ return dumper.represent_scalar(TAG_STR, value)
+
+
def fn_representer(dumper, fn_name, value):
tag = "!{}".format(fn_name)
@@ -82,6 +91,7 @@ def map_representer(dumper, value):
# Customise our dumpers
Dumper.add_representer(ODict, map_representer)
+Dumper.add_representer(six.text_type, string_representer)
CleanDumper.add_representer(ODict, map_representer)
Inconsistent conversion of strings from json to yaml
I am converting a document from json to yaml as part of a CloudFormation Template, and am noticing an odd error where some Id's that are marked as strings are being converted to strings, and other times not.
Here's a json snippet I'm working with right now which are the mappings for some of the Generic Elastic Load Balancer ID's for AWS:
``` "Mappings": {
"Regions": {
"us-east-1": {
"ELBID": "127311923021",
"Name": "ue1"
},
"us-east-2": {
"ELBID": "033677994240",
"Name": "ue2"
},
"us-west-1": {
"ELBID": "027434742980",
"Name": "uw1"
},
"us-west-2": {
"ELBID": "797873946194",
"Name": "uw2"
}
}
}
```
And This is the resulting yaml I'm getting after calling to_yaml:
```
Mappings:
Regions:
us-east-1:
ELBID: '127311923021'
Name: ue1
us-east-2:
ELBID: 033677994240
Name: ue2
us-west-1:
ELBID: 027434742980
Name: uw1
us-west-2:
ELBID: '797873946194'
Name: uw2
```
Strangely enough, any number beginning with 0 is converted, but the ones beginning with other numbers do not. I'm not sure what the expected behavior should be in this case, (either fully converted or not) but having it half and half is inconsistent, and I would believe is a bug.
Currently I'm having errors with using this yaml with sceptre/CloudFormation due to some of the Elastic Load Balancer ID's not being strings.
awslabs/aws-cfn-template-flip
diff --git a/tests/test_flip.py b/tests/test_flip.py
index c479a20..5ac0cee 100644
--- a/tests/test_flip.py
+++ b/tests/test_flip.py
@@ -502,5 +502,39 @@ def test_get_dumper():
When invoking get_dumper use clean_up & long_form
:return: LongCleanDumper
"""
+
resp = cfn_flip.get_dumper(clean_up=True, long_form=True)
assert resp == cfn_flip.yaml_dumper.LongCleanDumper
+
+
+def test_quoted_digits():
+ """
+ Any value that is composed entirely of digits
+ should be quoted for safety.
+ CloudFormation is happy for numbers to appear as strings.
+ But the opposite (e.g. account numbers as numbers) can cause issues
+ See https://github.com/awslabs/aws-cfn-template-flip/issues/41
+ """
+
+ value = dump_json(ODict((
+ ("int", 123456),
+ ("float", 123.456),
+ ("oct", "0123456"),
+ ("bad-oct", "012345678"),
+ ("safe-oct", "0o123456"),
+ ("string", "abcdef"),
+ )))
+
+ expected = "\n".join((
+ "int: 123456",
+ "float: 123.456",
+ "oct: '0123456'",
+ "bad-oct: '012345678'",
+ "safe-oct: '0o123456'",
+ "string: abcdef",
+ ""
+ ))
+
+ actual = cfn_flip.to_yaml(value)
+
+ assert actual == expected
diff --git a/pydatajson/ckan_utils.py b/pydatajson/ckan_utils.py
index f71570d..9724f44 100644
--- a/pydatajson/ckan_utils.py
+++ b/pydatajson/ckan_utils.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
+import re
+import sys
from datetime import time
from dateutil import parser, tz
from .helpers import title_to_name
@@ -14,7 +16,7 @@ def append_attribute_to_extra(package, dataset, attribute, serialize=False):
package['extras'].append({'key': attribute, 'value': value})
-def map_dataset_to_package(dataset, owner_org, theme_taxonomy, catalog_id=None,
+def map_dataset_to_package(catalog, dataset, owner_org, catalog_id=None,
demote_superThemes=True, demote_themes=True):
package = dict()
package['extras'] = []
@@ -66,7 +68,8 @@ def map_dataset_to_package(dataset, owner_org, theme_taxonomy, catalog_id=None,
if themes and demote_themes:
package['tags'] = package.get('tags', [])
for theme in themes:
- label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
+ label = catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '', label, flags=re.UNICODE)
package['tags'].append({'name': label})
else:
package['groups'] = package.get('groups', []) + [{'name': title_to_name(theme, decode=False)}
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 2c31623..9573040 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -29,9 +29,8 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier, portal_u
"""
dataset = catalog.get_dataset(dataset_origin_identifier)
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
- theme_taxonomy = catalog.themes
- package = map_dataset_to_package(dataset, owner_org, theme_taxonomy, catalog_id,
+ package = map_dataset_to_package(catalog, dataset, owner_org, catalog_id,
demote_superThemes, demote_themes)
# Get license id
diff --git a/pydatajson/search.py b/pydatajson/search.py
index 0ab6391..1c3d25a 100644
--- a/pydatajson/search.py
+++ b/pydatajson/search.py
@@ -280,7 +280,7 @@ def get_theme(catalog, identifier=None, label=None):
# filtra por id (preferentemente) o label
if identifier:
- filtered_themes = [theme for theme in themes if theme["id"] == identifier]
+ filtered_themes = [theme for theme in themes if theme["id"].lower() == identifier.lower()]
if len(filtered_themes) > 1:
raise ThemeIdRepeated([x["id"] for x in filtered_themes])
Robustecer el manejo de themes por parte de push_dataset_to_ckan()
Este es un issue que hace referencia al 24 de monitoreo apertura:
https://github.com/datosgobar/monitoreo-apertura/issues/24
Lo abro acá porque me parece que es más apropiado discutirlo en este repo.
--------------------------------------------------------------------------------------------------
**Contexto**
Se intentó federar el dataset de series de tiempo a la instancia andino.datos.gob.ar, de la siguiente manera:
```
catalog_modernizacion = DataJson("http://infra.datos.gob.ar/catalog/modernizacion/data.json")
catalog_modernizacion.push_dataset_to_ckan(
"modernizacion", "ministerio-de-modernizacion", "7",
portal_url, apikey
)
```
y dio la rta:
```
---------------------------------------------------------------------------
StopIteration Traceback (most recent call last)
<ipython-input-8-b4c2dae3c9ec> in <module>()
1 catalog_modernizacion.push_dataset_to_ckan(
2 "modernizacion", "ministerio-de-modernizacion", "7",
----> 3 portal_url, apikey
4 )
/Users/abenassi/github/pydatajson/pydatajson/federation.pyc in push_dataset_to_ckan(catalog, catalog_id, owner_org, dataset_origin_identifier, portal_url, apikey)
49 theme_taxonomy = catalog.themes
50 for theme in themes:
---> 51 label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
52 package['tags'].append({'name': label})
53
StopIteration:
```
**Propuesta**
Investigar qué hizo que se frenara la operación. Puede ser que el problema sea que se requiere crear "temas" que la instancia destino no tiene. Si es así, lo mejor sería incorporar la creación automática de temas que no existan dentro de la operación de federación.
datosgobar/pydatajson
diff --git a/tests/samples/full_data.json b/tests/samples/full_data.json
index b349578..e80673c 100644
--- a/tests/samples/full_data.json
+++ b/tests/samples/full_data.json
@@ -193,7 +193,7 @@
"id": "convocatorias"
},
{
- "label": "Compras",
+ "label": "Adquisición",
"description": "Datasets sobre compras realizadas.",
"id": "compras"
},
@@ -213,7 +213,7 @@
"id": "normativa"
},
{
- "label": "Proveedores",
+ "label": "Proveeduría",
"description": "Datasets sobre proveedores del Estado.",
"id": "proveedores"
}
diff --git a/tests/test_ckan_utils.py b/tests/test_ckan_utils.py
index 3f20984..f90406e 100644
--- a/tests/test_ckan_utils.py
+++ b/tests/test_ckan_utils.py
@@ -1,6 +1,10 @@
+# -*- coding: utf-8 -*-
+
import unittest
import os
import json
+import re
+import sys
from dateutil import parser, tz
from .context import pydatajson
from pydatajson.ckan_utils import map_dataset_to_package, map_distributions_to_resources, convert_iso_string_to_utc
@@ -23,15 +27,15 @@ class DatasetConversionTestCase(unittest.TestCase):
cls.distributions = cls.dataset['distribution']
def test_catalog_id_is_prepended_to_dataset_id_if_passed(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, package['id'])
def test_dataset_id_is_preserved_if_catlog_id_is_not_passed(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner')
self.assertEqual(self.dataset_id, package['id'])
def test_replicated_plain_attributes_are_corrext(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
plain_replicated_attributes = [('title', 'title'),
('notes', 'description'),
('url', 'landingPage')]
@@ -40,7 +44,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertEqual('owner', package['owner_org'])
def test_dataset_nested_replicated_attributes_stay_the_same(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
contact_point_nested = [('maintainer', 'fn'),
('maintainer_email', 'hasEmail')]
for fst, snd in contact_point_nested:
@@ -51,7 +55,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertEqual(self.dataset.get('publisher').get(snd), package.get(fst))
def test_dataset_array_attributes_are_correct(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
groups = [group['name'] for group in package.get('groups', [])]
super_themes = [title_to_name(s_theme.lower()) for s_theme in self.dataset.get('superTheme')]
try:
@@ -65,7 +69,8 @@ class DatasetConversionTestCase(unittest.TestCase):
themes = self.dataset.get('theme', [])
theme_labels = []
for theme in themes:
- label = next(x['label'] for x in self.catalog.themes if x['id'] == theme)
+ label = self.catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\w .-]+', '', label, flags=re.UNICODE)
theme_labels.append(label)
try:
@@ -74,7 +79,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords + theme_labels, tags)
def test_themes_are_preserved_if_not_demoted(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes,
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner',
catalog_id=self.catalog_id, demote_themes=False)
groups = [group['name'] for group in package.get('groups', [])]
super_themes = [title_to_name(s_theme.lower()) for s_theme in self.dataset.get('superTheme')]
@@ -92,7 +97,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords, tags)
def test_superThemes_dont_impact_groups_if_not_demoted(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes,
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner',
catalog_id=self.catalog_id, demote_superThemes=False)
groups = [group['name'] for group in package.get('groups', [])]
tags = [tag['name'] for tag in package['tags']]
@@ -100,7 +105,8 @@ class DatasetConversionTestCase(unittest.TestCase):
themes = self.dataset.get('theme', [])
theme_labels = []
for theme in themes:
- label = next(x['label'] for x in self.catalog.themes if x['id'] == theme)
+ label = self.catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '', label, flags=re.UNICODE)
theme_labels.append(label)
try:
self.assertItemsEqual([], groups)
@@ -112,7 +118,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords + theme_labels, tags)
def test_preserve_themes_and_superThemes(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes,
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner',
self.catalog_id, False, False)
groups = [group['name'] for group in package.get('groups', [])]
tags = [tag['name'] for tag in package['tags']]
@@ -128,7 +134,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords, tags)
def test_dataset_extra_attributes_are_correct(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
# extras are included in dataset
if package['extras']:
for extra in package['extras']:
@@ -144,7 +150,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertEqual(dataset_value, extra_value)
def test_dataset_extra_attributes_are_complete(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
# dataset attributes are included in extras
extra_attrs = ['issued', 'modified', 'accrualPeriodicity', 'temporal', 'language', 'spatial', 'superTheme']
for key in extra_attrs:
diff --git a/tests/test_federation.py b/tests/test_federation.py
index e4a1d2e..e6804b9 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -1,6 +1,9 @@
+# -*- coding: utf-8 -*-
+
import unittest
import os
import re
+import sys
try:
from mock import patch, MagicMock
except ImportError:
@@ -83,10 +86,10 @@ class PushDatasetTestCase(unittest.TestCase):
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_tags_are_passed_correctly(self, mock_portal):
themes = self.dataset['theme']
- theme_taxonomy = self.catalog.themes
keywords = [kw for kw in self.dataset['keyword']]
for theme in themes:
- label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
+ label = self.catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\w .-]+', '', label, flags=re.UNICODE)
keywords.append(label)
def mock_call_action(action, data_dict=None):
diff --git a/odml/doc.py b/odml/doc.py
index 8d75f2b..ea15912 100644
--- a/odml/doc.py
+++ b/odml/doc.py
@@ -34,10 +34,13 @@ class BaseDocument(base.sectionable, Document):
print(e)
self._id = str(uuid.uuid4())
self._author = author
- self._date = date # date must be a datetime
self._version = version
self._repository = repository
+ # Make sure date is properly parsed into a datetime object
+ self._date = None
+ self.date = date
+
@property
def id(self):
"""
diff --git a/odml/dtypes.py b/odml/dtypes.py
index 39d1e8d..e86bf8d 100644
--- a/odml/dtypes.py
+++ b/odml/dtypes.py
@@ -1,5 +1,5 @@
import sys
-import datetime
+import datetime as dt
from enum import Enum
self = sys.modules[__name__].__dict__
@@ -12,6 +12,10 @@ try:
except NameError:
unicode = str
+FORMAT_DATE = "%Y-%m-%d"
+FORMAT_DATETIME = "%Y-%m-%d %H:%M:%S"
+FORMAT_TIME = "%H:%M:%S"
+
class DType(str, Enum):
string = 'string'
@@ -44,11 +48,11 @@ def default_values(dtype):
return default_dtype_value[dtype]
if dtype == 'datetime':
- return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ return dt.datetime.now().replace(microsecond=0)
if dtype == 'date':
- return datetime.datetime.now().strftime('%Y-%m-%d')
+ return dt.datetime.now().date()
if dtype == 'time':
- return datetime.datetime.now().strftime('%H:%M:%S')
+ return dt.datetime.now().replace(microsecond=0).time()
return '' # Maybe return None ?
@@ -65,9 +69,9 @@ def infer_dtype(value):
if dtype == 'string' and '\n' in value:
dtype = 'text'
return dtype
- else:
- # If unable to infer a dtype of given value, return defalt as *string*
- return 'string'
+
+ # If unable to infer a dtype of given value, return default as *string*
+ return 'string'
def valid_type(dtype):
@@ -109,14 +113,15 @@ def set(value, dtype=None):
if isinstance(value, str):
return str_set(value)
else:
- if type(value) in (str, unicode):
+ if isinstance(value, (str, unicode)):
return str_set(value)
return self.get(dtype + "_set", str_set)(value)
def int_get(string):
- if not string:
- return 0
+ if string is None or string == "":
+ return default_values("int")
+
try:
return int(string)
except ValueError:
@@ -125,14 +130,20 @@ def int_get(string):
def float_get(string):
- if not string:
- return 0.0
+ if string is None or string == "":
+ return default_values("float")
+
return float(string)
def str_get(string):
+ # Do not stringify empty list or dict but make sure boolean False gets through.
+ if string in [None, "", [], {}]:
+ return default_values("string")
+
if sys.version_info < (3, 0):
return unicode(string)
+
return str(string)
@@ -144,71 +155,65 @@ string_set = str_get
def time_get(string):
- if not string:
- return None
- if type(string) is datetime.time:
- return datetime.datetime.strptime(string.strftime('%H:%M:%S'),
- '%H:%M:%S').time()
- else:
- return datetime.datetime.strptime(string, '%H:%M:%S').time()
+ if string is None or string == "":
+ return default_values("time")
+ if isinstance(string, dt.time):
+ return dt.datetime.strptime(string.strftime(FORMAT_TIME), FORMAT_TIME).time()
-def time_set(value):
- if not value:
- return None
- if type(value) is datetime.time:
- return value.strftime("%H:%M:%S")
- return value.isoformat()
+ return dt.datetime.strptime(string, FORMAT_TIME).time()
+
+
+time_set = time_get
def date_get(string):
- if not string:
- return None
- if type(string) is datetime.date:
- return datetime.datetime.strptime(string.isoformat(),
- '%Y-%m-%d').date()
- else:
- return datetime.datetime.strptime(string, '%Y-%m-%d').date()
+ if string is None or string == "":
+ return default_values("date")
+
+ if isinstance(string, dt.date):
+ return dt.datetime.strptime(string.isoformat(), FORMAT_DATE).date()
+
+ return dt.datetime.strptime(string, FORMAT_DATE).date()
-date_set = time_set
+date_set = date_get
def datetime_get(string):
- if not string:
- return None
- if type(string) is datetime.datetime:
- return datetime.datetime.strptime(string.strftime('%Y-%m-%d %H:%M:%S'),
- '%Y-%m-%d %H:%M:%S')
- else:
- return datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
+ if string is None or string == "":
+ return default_values("datetime")
+ if isinstance(string, dt.datetime):
+ return dt.datetime.strptime(string.strftime(FORMAT_DATETIME), FORMAT_DATETIME)
-def datetime_set(value):
- if not value:
- return None
- if type(value) is datetime.datetime:
- return value.strftime('%Y-%m-%d %H:%M:%S')
- else:
- return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
+ return dt.datetime.strptime(string, FORMAT_DATETIME)
+
+
+datetime_set = datetime_get
def boolean_get(string):
- if string is None:
- return None
- if type(string) in (unicode, str):
+ if string in [None, "", [], {}]:
+ return default_values("boolean")
+
+ if isinstance(string, (unicode, str)):
string = string.lower()
+
truth = ["true", "1", True, "t"] # be kind, spec only accepts True / False
if string in truth:
return True
+
false = ["false", "0", False, "f"]
if string in false:
return False
+
# disallow any values that cannot be interpreted as boolean.
raise ValueError
# Alias boolean_set to boolean_get. Both perform same function.
+
boolean_set = boolean_get
bool_get = boolean_get
bool_set = boolean_set
diff --git a/odml/tools/dict_parser.py b/odml/tools/dict_parser.py
index 0d2cdf0..3dd95a9 100644
--- a/odml/tools/dict_parser.py
+++ b/odml/tools/dict_parser.py
@@ -83,7 +83,13 @@ class DictWriter:
if isinstance(tag, tuple):
prop_dict[attr] = list(tag)
elif (tag == []) or tag: # Even if 'value' is empty, allow '[]'
- prop_dict[attr] = tag
+ # Custom odML tuples require special handling
+ # for save loading from file.
+ if attr == "value" and prop.dtype and \
+ prop.dtype.endswith("-tuple") and len(prop.value) > 0:
+ prop_dict["value"] = "(%s)" % ";".join(prop.value[0])
+ else:
+ prop_dict[attr] = tag
props_seq.append(prop_dict)
@@ -178,17 +184,13 @@ class DictReader:
for _property in props_list:
prop_attrs = {}
- values = []
for i in _property:
attr = self.is_valid_attribute(i, odmlfmt.Property)
- if attr == 'value':
- values = _property['value']
if attr:
prop_attrs[attr] = _property[attr]
prop = odmlfmt.Property.create(**prop_attrs)
- prop.value = values
odml_props.append(prop)
return odml_props
diff --git a/odml/tools/odmlparser.py b/odml/tools/odmlparser.py
index 1caa7fa..641a52a 100644
--- a/odml/tools/odmlparser.py
+++ b/odml/tools/odmlparser.py
@@ -5,6 +5,7 @@ A generic odML parsing module.
Parses odML files and documents.
"""
+import datetime
import json
import yaml
@@ -67,11 +68,21 @@ class ODMLWriter:
if self.parser == 'YAML':
string_doc = yaml.dump(odml_output, default_flow_style=False)
elif self.parser == 'JSON':
- string_doc = json.dumps(odml_output, indent=4)
+ string_doc = json.dumps(odml_output, indent=4,
+ cls=JSONDateTimeSerializer)
return string_doc
+# Required to serialize datetime values with JSON.
+class JSONDateTimeSerializer(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
+ return str(o)
+
+ return json.JSONEncoder.default(self, o)
+
+
class ODMLReader:
"""
A reader to parse odML files or strings into odml documents,
diff --git a/odml/tools/xmlparser.py b/odml/tools/xmlparser.py
index a0d48c1..f2ea862 100644
--- a/odml/tools/xmlparser.py
+++ b/odml/tools/xmlparser.py
@@ -83,7 +83,11 @@ class XMLWriter:
if val is None:
continue
if isinstance(fmt, format.Property.__class__) and k == "value":
- ele = E(k, to_csv(val))
+ # Custom odML tuples require special handling for save loading from file.
+ if e.dtype and e.dtype.endswith("-tuple") and len(val) > 0:
+ ele = E(k, "(%s)" % ";".join(val[0]))
+ else:
+ ele = E(k, to_csv(val))
cur.append(ele)
else:
if isinstance(val, list):
[dtype] Return defined default values
Currently the "get" methods in `dtype.py` return magic numbers as default values. Make them return the already defined default values instead.
G-Node/python-odml
diff --git a/test/test_dtypes.py b/test/test_dtypes.py
index 6e90e5e..bbc3d35 100644
--- a/test/test_dtypes.py
+++ b/test/test_dtypes.py
@@ -1,8 +1,7 @@
+import datetime
import unittest
import odml.dtypes as typ
-import odml
-import datetime
class TestTypes(unittest.TestCase):
@@ -11,42 +10,150 @@ class TestTypes(unittest.TestCase):
pass
def test_date(self):
+ self.assertIsInstance(typ.date_get(None), datetime.date)
+ self.assertIsInstance(typ.date_get(""), datetime.date)
+
+ re = "^[0-9]{4}-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])$"
+ self.assertRegexpMatches(typ.date_get(None).strftime(typ.FORMAT_DATE), re)
+ self.assertRegexpMatches(typ.date_get("").strftime(typ.FORMAT_DATE), re)
+
date = datetime.date(2011, 12, 1)
date_string = '2011-12-01'
self.assertEqual(date, typ.date_get(date_string))
- self.assertEqual(typ.date_set(date), date_string)
+ self.assertEqual(date, typ.date_get(date))
+
+ with self.assertRaises(TypeError):
+ _ = typ.date_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.date_get({})
+ with self.assertRaises(TypeError):
+ _ = typ.date_get(False)
+
+ # Test fail on datetime.datetime
+ with self.assertRaises(ValueError):
+ _ = typ.date_get(datetime.datetime.now())
+
+ # Test fail on datetime.time
+ with self.assertRaises(TypeError):
+ _ = typ.date_get(datetime.datetime.now().time())
+
+ # Test fail on invalid string format
+ with self.assertRaises(ValueError):
+ _ = typ.date_get("11.11.1111")
def test_time(self):
+ self.assertIsInstance(typ.time_get(None), datetime.time)
+ self.assertIsInstance(typ.time_get(""), datetime.time)
+
+ re = "^[0-5][0-9]:[0-5][0-9]:[0-5][0-9]$"
+ self.assertRegexpMatches(typ.time_get(None).strftime(typ.FORMAT_TIME), re)
+ self.assertRegexpMatches(typ.time_get("").strftime(typ.FORMAT_TIME), re)
+
time = datetime.time(12, 34, 56)
time_string = '12:34:56'
self.assertEqual(time, typ.time_get(time_string))
- self.assertEqual(typ.time_set(time), time_string)
+ self.assertEqual(time, typ.time_get(time))
+
+ with self.assertRaises(TypeError):
+ _ = typ.time_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.time_get({})
+ with self.assertRaises(TypeError):
+ _ = typ.time_get(False)
+
+ # Test fail on datetime.datetime
+ with self.assertRaises(TypeError):
+ _ = typ.time_get(datetime.datetime.now())
+
+ # Test fail on datetime.date
+ with self.assertRaises(TypeError):
+ _ = typ.time_get(datetime.datetime.now().date())
+
+ # Test fail on invalid string format
+ with self.assertRaises(ValueError):
+ _ = typ.time_get("11-11-11")
def test_datetime(self):
+ self.assertIsInstance(typ.datetime_get(None), datetime.datetime)
+ self.assertIsInstance(typ.datetime_get(""), datetime.datetime)
+
+ re = "^[0-9]{4}-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1]) " \
+ "[0-5][0-9]:[0-5][0-9]:[0-5][0-9]$"
+ self.assertRegexpMatches(typ.datetime_get(None).strftime(typ.FORMAT_DATETIME), re)
+ self.assertRegexpMatches(typ.datetime_get("").strftime(typ.FORMAT_DATETIME), re)
+
date = datetime.datetime(2011, 12, 1, 12, 34, 56)
date_string = '2011-12-01 12:34:56'
self.assertEqual(date, typ.datetime_get(date_string))
- self.assertEqual(typ.datetime_set(date), date_string)
+ self.assertEqual(date, typ.datetime_get(date))
+
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get({})
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get(False)
+
+ # Test fail on datetime.time
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get(datetime.datetime.now().time())
+
+ # Test fail on datetime.date
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get(datetime.datetime.now().date())
+
+ # Test fail on invalid string format
+ with self.assertRaises(ValueError):
+ _ = typ.datetime_get("11.11.1111 12:12:12")
def test_int(self):
- p = odml.Property("test", value="123456789012345678901", dtype="int")
- self.assertEqual(p.value[0], 123456789012345678901)
- p = odml.Property("test", value="-123456789012345678901", dtype="int")
- self.assertEqual(p.value[0], -123456789012345678901)
- p = odml.Property("test", value="123.45", dtype="int")
- self.assertEqual(p.value[0], 123)
+ self.assertEqual(typ.default_values("int"), typ.int_get(None))
+ self.assertEqual(typ.default_values("int"), typ.int_get(""))
+
+ self.assertIsInstance(typ.int_get(11), int)
+ self.assertIsInstance(typ.int_get(1.1), int)
+ self.assertIsInstance(typ.int_get("11"), int)
+ self.assertEqual(typ.int_get("123456789012345678901"), 123456789012345678901)
+ self.assertEqual(typ.int_get("-123456789012345678901"), -123456789012345678901)
+ self.assertEqual(typ.int_get("123.45"), 123)
+
+ with self.assertRaises(TypeError):
+ _ = typ.int_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.int_get({})
+ with self.assertRaises(ValueError):
+ _ = typ.int_get("fail")
+
+ def test_float(self):
+ self.assertEqual(typ.default_values("float"), typ.float_get(None))
+ self.assertEqual(typ.default_values("float"), typ.float_get(""))
+
+ self.assertIsInstance(typ.float_get(1), float)
+ self.assertIsInstance(typ.float_get("1.1"), float)
+ self.assertEqual(typ.float_get(123.45), 123.45)
+
+ with self.assertRaises(TypeError):
+ _ = typ.float_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.float_get({})
+ with self.assertRaises(ValueError):
+ _ = typ.float_get("fail")
def test_str(self):
- s = odml.Property(name='Name', value='Sherin')
- self.assertEqual(s.value[0], 'Sherin')
- self.assertEqual(s.dtype, 'string')
+ self.assertEqual(typ.default_values("string"), typ.str_get(None))
+ self.assertEqual(typ.default_values("string"), typ.str_get(""))
+ self.assertEqual(typ.default_values("string"), typ.str_get([]))
+ self.assertEqual(typ.default_values("string"), typ.str_get({}))
- s.value = 'Jerin'
- self.assertEqual(s.value[0], 'Jerin')
- self.assertEqual(s.dtype, 'string')
+ # Make sure boolean values are properly converted to string.
+ self.assertEqual(typ.str_get(False), 'False')
+ self.assertEqual(typ.str_get(True), 'True')
def test_bool(self):
- self.assertEqual(None, typ.boolean_get(None))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get(None))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get(""))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get([]))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get({}))
true_values = [True, "TRUE", "true", "T", "t", "1", 1]
for val in true_values:
@@ -64,18 +171,17 @@ class TestTypes(unittest.TestCase):
typ.boolean_get(2.1)
def test_tuple(self):
- # Success test
- t = odml.Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
- tuple_value = t.value[0] # As the formed tuple is a list of list
- self.assertEqual(tuple_value[0], '39.12')
- self.assertEqual(tuple_value[1], '67.19')
+ self.assertIs(typ.tuple_get(""), None)
+ self.assertIs(typ.tuple_get(None), None)
- # Failure test. More tuple values then specified.
- with self.assertRaises(ValueError):
- t = odml.Property(name="Public-Key", value='(5689; 1254; 687)',
- dtype='2-tuple')
+ self.assertEqual(typ.tuple_get("(39.12; 67.19)"), ["39.12", "67.19"])
+
+ # Test fail on missing parenthesis.
+ with self.assertRaises(AssertionError):
+ _ = typ.tuple_get("fail")
+ # Test fail on mismatching element count and count number.
+ with self.assertRaises(AssertionError):
+ _ = typ.tuple_get("(1; 2; 3)", 2)
def test_dtype_none(self):
- t = odml.Property(name="Record", value={'name': 'Marie'})
- self.assertEqual(t.dtype, 'string')
- self.assertEqual(t.value[0], "{'name': 'Marie'}")
+ self.assertEqual(typ.get({'name': 'Marie'}), "{'name': 'Marie'}")
diff --git a/test/test_infer_type.py b/test/test_infer_type.py
index 8909f85..7f27bc4 100644
--- a/test/test_infer_type.py
+++ b/test/test_infer_type.py
@@ -11,51 +11,51 @@ class TestInferType(unittest.TestCase):
p = Property("test", value="somestring")
assert(p.dtype == "string")
if sys.version_info < (3, 0):
- assert (type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert (type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
def test_text(self):
p = Property("test", value="some\nstring")
assert(p.dtype == "text")
if sys.version_info < (3, 0):
- assert (type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert (type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
def test_int(self):
p = Property("test", value=111)
assert(p.dtype == "int")
- assert(type(p.value[0]) == int)
+ assert isinstance(p.value[0], int)
def test_float(self):
p = Property("test", value=3.14)
assert(p.dtype == "float")
- assert(type(p.value[0]) == float)
+ assert isinstance(p.value[0], float)
def test_datetime(self):
p = Property("test", value=dt.now())
assert(p.dtype == "datetime")
- assert(type(p.value[0]) == dt)
+ assert isinstance(p.value[0], dt)
def test_date(self):
p = Property("test", dt.now().date())
assert(p.dtype == "date")
- assert(type(p.value[0]) == date)
+ assert isinstance(p.value[0], date)
def test_time(self):
p = Property("test", value=dt.now().time())
assert(p.dtype == "time")
- assert(type(p.value[0]) == time)
+ assert isinstance(p.value[0], time)
def test_boolean(self):
p = Property("test", True)
assert(p.dtype == "boolean")
- assert(type(p.value[0]) == bool)
+ assert isinstance(p.value[0], bool)
p = Property("test", False)
assert(p.dtype == "boolean")
- assert(type(p.value[0]) == bool)
+ assert isinstance(p.value[0], bool)
def test_read_write(self):
doc = Document("author")
@@ -79,37 +79,37 @@ class TestInferType(unittest.TestCase):
p = new_sec.properties["strprop"]
assert(p.dtype == "string")
if sys.version_info < (3, 0):
- assert(type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert(type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
p = new_sec.properties["txtprop"]
assert(p.dtype == "text")
if sys.version_info < (3, 0):
- assert(type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert(type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
p = new_sec.properties["intprop"]
assert(p.dtype == "int")
- assert(type(p.value[0]) == int)
+ assert isinstance(p.value[0], int)
p = new_sec.properties["floatprop"]
assert(p.dtype == "float")
- assert(type(p.value[0]) == float)
+ assert isinstance(p.value[0], float)
p = new_sec.properties["datetimeprop"]
assert(p.dtype == "datetime")
- assert(type(p.value[0]) == dt)
+ assert isinstance(p.value[0], dt)
p = new_sec.properties["dateprop"]
assert(p.dtype == "date")
- assert(type(p.value[0]) == date)
+ assert isinstance(p.value[0], date)
p = new_sec.properties["timeprop"]
assert(p.dtype == "time")
- assert(type(p.value[0]) == time)
+ assert isinstance(p.value[0], time)
p = new_sec.properties["boolprop"]
assert(p.dtype == "boolean")
- assert(type(p.value[0]) == bool)
+ assert isinstance(p.value[0], bool)
diff --git a/test/test_property.py b/test/test_property.py
index c122f97..cbcaade 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -73,6 +73,24 @@ class TestProperty(unittest.TestCase):
p6 = Property("test", {"name": "Marie", "name":"Johanna"})
self.assertEqual(len(p6), 1)
+ # Test tuple dtype value.
+ t = Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
+ tuple_value = t.value[0] # As the formed tuple is a list of list
+ self.assertEqual(tuple_value[0], '39.12')
+ self.assertEqual(tuple_value[1], '67.19')
+
+ # Test invalid tuple length
+ with self.assertRaises(ValueError):
+ _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='2-tuple')
+
+ # Test missing tuple length.
+ with self.assertRaises(ValueError):
+ _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='-tuple')
+
+ # Test invalid tuple format.
+ with self.assertRaises(ValueError):
+ _ = Property(name="Public-Key", value='5689; 1254; 687', dtype='3-tuple')
+
def test_get_set_value(self):
values = [1, 2, 3, 4, 5]
p = Property("property", value=values)
diff --git a/test/test_samplefile.py b/test/test_samplefile.py
index 92ae8ec..3bd5ec6 100644
--- a/test/test_samplefile.py
+++ b/test/test_samplefile.py
@@ -197,7 +197,7 @@ class AttributeTest(unittest.TestCase):
def test_conversion_int_to_float(self):
p = odml.Property("test", "1", dtype="int")
self.assertEqual(p.dtype, "int")
- self.assertEqual(type(p.value[0]), int)
+ self.assertIsInstance(p.value[0], int)
p.dtype = "float" # change dtype
self.assertEqual(p.dtype, "float")
self.assertEqual(p.value[0], 1.0)
diff --git a/pypunisher/__init__.py b/pypunisher/__init__.py
index 97d4631..0730e06 100644
--- a/pypunisher/__init__.py
+++ b/pypunisher/__init__.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
"""
PyPunisher
==========
diff --git a/pypunisher/_checks.py b/pypunisher/_checks.py
index a268a99..ad8a08f 100644
--- a/pypunisher/_checks.py
+++ b/pypunisher/_checks.py
@@ -1,8 +1,8 @@
-"""
-
- Checks
- ~~~~~~
+#!/usr/bin/env python
+"""
+Checks
+======
"""
import numpy as np
@@ -48,20 +48,21 @@ def input_checks(locals_):
"""
# Sort so that the order of the parameter name
# are in a reliable (alphabetical) order.
- param_a, param_b = sorted(k for k, p in locals_.items() if k != 'self')
- locals_non_non = {k: v for k, v in locals_.items()
- if v is not None and k != 'self'}
+ ALLOWED = ('min_change', 'n_features')
+ param_a, param_b = sorted(k for k, p in locals_.items() if k in ALLOWED)
+ locals_non_none = {k: v for k, v in locals_.items()
+ if v is not None and k in ALLOWED}
- if len(locals_non_non) != 1:
+ if len(locals_non_none) != 1:
raise TypeError(
"At least one of `{a}` and `{b}` must be None.".format(
a=param_a, b=param_b
)
)
- # Unpack the single key and value pair
- name, obj = tuple(locals_non_non.items())[0]
- if obj is None and not isinstance(obj, (int, float)):
+ # Unpack the single key and value pair.
+ name, obj = tuple(locals_non_none.items())[0]
+ if not isinstance(obj, (int, float)):
raise TypeError(
"`{}` must be of type int or float.".format(name)
)
diff --git a/pypunisher/metrics/__init__.py b/pypunisher/metrics/__init__.py
index 0ef76a8..8fe54ef 100644
--- a/pypunisher/metrics/__init__.py
+++ b/pypunisher/metrics/__init__.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
"""
Metrics
=======
diff --git a/pypunisher/metrics/criterion.py b/pypunisher/metrics/criterion.py
index 91c5150..70e599b 100644
--- a/pypunisher/metrics/criterion.py
+++ b/pypunisher/metrics/criterion.py
@@ -1,8 +1,8 @@
-"""
-
- Information Criterion
- ~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+ Information Criterion
+ =====================
"""
from numpy import log, ndarray, pi
from pypunisher._checks import model_check
diff --git a/pypunisher/selection_engines/__init__.py b/pypunisher/selection_engines/__init__.py
index 65479f8..aac2dd9 100644
--- a/pypunisher/selection_engines/__init__.py
+++ b/pypunisher/selection_engines/__init__.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
"""
Selection Engines
=================
diff --git a/pypunisher/selection_engines/_utils.py b/pypunisher/selection_engines/_utils.py
index 9315f08..013198a 100644
--- a/pypunisher/selection_engines/_utils.py
+++ b/pypunisher/selection_engines/_utils.py
@@ -1,8 +1,8 @@
-"""
-
- Utils
- ~~~~~
+#!/usr/bin/env python
+"""
+Utils
+=====
"""
def get_n_features(matrix, min_=2):
@@ -47,8 +47,7 @@ def enforce_use_of_all_cpus(model):
exists
"""
- if hasattr(model, 'n_jobs'):
- setattr(model, 'n_jobs', -1)
+ setattr(model, 'n_jobs', -1)
return model
diff --git a/pypunisher/selection_engines/selection.py b/pypunisher/selection_engines/selection.py
index 067a3f5..561aec7 100644
--- a/pypunisher/selection_engines/selection.py
+++ b/pypunisher/selection_engines/selection.py
@@ -1,8 +1,8 @@
-"""
-
- Forward and Backward Selection Algorithms
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Forward and Backward Selection Algorithms
+=========================================
"""
from pypunisher.metrics.criterion import aic, bic
from pypunisher._checks import model_check, array_check, input_checks
@@ -93,6 +93,27 @@ class Selection(object):
score = self._model.score(X_val, y_val)
return score
+ @staticmethod
+ def _do_not_skip(kwargs):
+ """Check for skipping override by looking
+ for `_do_not_skip` in keyword arguments
+ If it is present, the loops in the algorithms
+ will be run to exhaustion.
+
+ Args:
+ kwargs : dict
+ Keyword Args
+
+ Returns:
+ Bool
+ If `_do_not_skip` is not present
+ or `_do_not_skip` is present and is True.
+ Otherwise, the value of `do_not_skip`
+ is returned.
+
+ """
+ return kwargs.get('_do_not_skip', True)
+
def _forward_break_criteria(self, S, min_change, best_j_score,
j_score_dict, n_features):
"""Check if `forward()` should break
@@ -128,7 +149,7 @@ class Selection(object):
else:
return False
- def forward(self, min_change=0.5, n_features=None):
+ def forward(self, min_change=0.5, n_features=None, **kwargs):
"""Perform Forward Selection on a Sklearn model.
Args:
@@ -140,6 +161,10 @@ class Selection(object):
Note: `min_change` must be None in order for `n_features` to operate.
Floats will be regarded as proportions of the total
that must lie on (0, 1).
+ kwargs : Keyword Args
+ Includes:
+ * `_do_not_skip`: for interal use only; it is
+ not recommended that users use this parameter.
Returns:
S : list
@@ -150,14 +175,18 @@ class Selection(object):
S = list()
best_score = None
itera = list(range(self._total_number_of_features))
+ do_not_skip = self._do_not_skip(kwargs)
- if n_features:
+ if n_features and do_not_skip:
n_features = parse_n_features(n_features, total=len(itera))
for i in range(self._total_number_of_features):
if self._verbose:
print("Iteration: {}".format(i))
+ if not do_not_skip:
+ continue
+
# 1. Find best feature, j, to add.
j_score_dict = dict()
for j in itera:
@@ -182,7 +211,7 @@ class Selection(object):
return S
- def backward(self, n_features=0.5, min_change=None):
+ def backward(self, n_features=0.5, min_change=None, **kwargs):
"""Perform Backward Selection on a Sklearn model.
Args:
@@ -194,6 +223,14 @@ class Selection(object):
min_change : int or float, optional
The smallest change to be considered significant.
`n_features` must be None for `min_change` to operate.
+ kwargs : Keyword Args
+ Includes:
+ * `_do_not_skip` : bool
+ Explore loop exhaustion.
+ **For internal use only**; Not intended for outside use.
+ * `_last_score_punt` : bool
+ Relax `defeated_last_iter_score` decision boundary.
+ **For internal use only**. Not intended for outside use.
Returns:
S : list
@@ -205,8 +242,10 @@ class Selection(object):
"""
input_checks(locals())
S = list(range(self._total_number_of_features)) # start with all features
+ do_not_skip = self._do_not_skip(kwargs)
+ last_score_punt = kwargs.get('_last_score_punt', False)
- if n_features:
+ if n_features and do_not_skip:
n_features = parse_n_features(n_features, total=len(S))
last_iter_score = self._fit_and_score(S, feature=None, algorithm='backward')
@@ -215,6 +254,9 @@ class Selection(object):
if self._verbose:
print("Iteration: {}".format(i))
+ if not do_not_skip:
+ continue
+
# 1. Hunt for the least predictive feature.
best = {'feature': None, 'score': None, 'defeated_last_iter_score': True}
for j in S:
@@ -228,13 +270,13 @@ class Selection(object):
if isinstance(n_features, int):
S.remove(to_drop) # blindly drop.
last_iter_score = best_new_score
- if len(S) == n_features:
- break
- else:
+ if not len(S) == n_features:
continue # i.e., ignore criteria below.
+ else:
+ break
# 2b. Halt if the change is not longer considered significant.
- if isinstance(min_change, (int, float)):
- if best['defeated_last_iter_score']:
+ else:
+ if best['defeated_last_iter_score'] or last_score_punt:
if (best_new_score - last_iter_score) < min_change:
break # there was a change, but it was not large enough.
else:
@@ -243,8 +285,4 @@ class Selection(object):
else:
break
- # 2c. Halt if only one feature remains.
- if len(S) == 1:
- break
-
return S
Feedback on Milestone 2
Hi All,
Nice work for milestone 2. I like your comprehensive designs for the entire package. Here is my comments:
1. Good Practice to state out installation requires python 3.6
2. I like your coverage part to detail your test coverage, excellent
3. For __init__.py, line 4 and 5, why not just list.append() to have version number?
4. For selection_engines/__init__.py, I like your comments for the issue in scipy
5. Please improve your style in Python programming, you can refer to https://google.github.io/styleguide/pyguide.html as in selection.py line 4, this is not professional, also the space you have between lines are not equal.
6. For your first line of your python file, it is suggested that you can include #!/usr/bin/env python just in case the user is running your code in Linux(like me)
7. for _fit_and_score(self, S, feature, algorithm), what if algorithm input is wrong input parameters? if you thought about that?
8. For function backward(), S is not a good naming for a list
Regards
Jason
UBC-MDS/PyPunisher
diff --git a/tests/__init__.py b/tests/__init__.py
index e69de29..4265cc3 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -0,0 +1,1 @@
+#!/usr/bin/env python
diff --git a/tests/_defaults.py b/tests/_defaults.py
index b9a31c4..ddfc126 100644
--- a/tests/_defaults.py
+++ b/tests/_defaults.py
@@ -1,8 +1,8 @@
-"""
+#!/usr/bin/env python
+"""
Default Base for Testing Against
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
+ ================================
"""
from sklearn.linear_model import LinearRegression
from tests._test_data import X_train, y_train, X_val, y_val
diff --git a/tests/_test_data.py b/tests/_test_data.py
index 5b74671..adc5f13 100644
--- a/tests/_test_data.py
+++ b/tests/_test_data.py
@@ -1,31 +1,41 @@
-"""
-
- Test Data
- ~~~~~~~~~
-
- Generate: y = x + e, where e ~ Uniform(0, 50) and
- `x` is embedded as the middle column in a zero matrix.
- That is, only ONE column is predictive of y, the rest are
- trivial column vectors.
+#!/usr/bin/env python
+"""
+Test Data
+=========
+Generate: y = x + e, where e ~ Uniform(0, 50) and
+`x` is embedded as the middle column in a zero matrix.
+That is, only ONE column is predictive of y, the rest are
+trivial column vectors.
+
+X_train : 2D array
+ Training Features.
+X_val : 2D array
+ Validation Features.
+y_train : 1D array
+ Training labels.
+y_val : 1D array
+ Validation Labels
+true_best_feature : int, list
+ Denotes the best feature
+ that is actually predictive of the response.
"""
import numpy as np
from sklearn.model_selection import train_test_split
SEED = 99
-
-features = 20
-obs = 501
-middle_feature = features // 2
+FEATURES = 20
+OBSERVATIONS = 501
+middle_feature = FEATURES // 2
np.random.seed(SEED)
-X = np.zeros((obs, features))
-y = np.arange(obs)
-X[:, middle_feature] = y + np.random.uniform(0, 50, size=obs)
+X = np.zeros((OBSERVATIONS, FEATURES))
+y = np.arange(OBSERVATIONS)
+X[:, middle_feature] = y + np.random.uniform(0, 50, size=OBSERVATIONS)
X_train, X_val, y_train, y_val = train_test_split(X, y, random_state=SEED)
-TRUE_BEST_FEATURE = middle_feature
+true_best_feature = middle_feature
# Visualize ---
# import matplotlib.pyplot as plt
diff --git a/tests/_wrappers.py b/tests/_wrappers.py
index e4404f9..2dcf69d 100644
--- a/tests/_wrappers.py
+++ b/tests/_wrappers.py
@@ -1,8 +1,8 @@
-"""
-
- Wrapper Functions for Testing
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Wrapper Functions for Testing
+=============================
"""
from copy import deepcopy
from tests._defaults import DEFAULT_SELECTION_PARAMS
diff --git a/tests/test_backward_selection.py b/tests/test_backward_selection.py
index d44f603..cdb8b2a 100644
--- a/tests/test_backward_selection.py
+++ b/tests/test_backward_selection.py
@@ -1,8 +1,8 @@
-"""
-
- Tests Specific to Backward Selection
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Tests Specific to Backward Selection
+====================================
"""
import os
import sys
@@ -13,33 +13,88 @@ sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
from tests._wrappers import backward
+from tests._test_data import X_train
+
+# -----------------------------------------------------------------------------
+# Test `backward()` Params
+# -----------------------------------------------------------------------------
-def test_backward_params():
+
+def test_n_features_greater_than_zero_backward():
"""
- Check parameters to `backward()` raise when expected.
+ Check `backward()`'s `n_features` raises when
+ not greater than zero
"""
msg = "`n_features` must be greater than zero."
with pytest.raises(ValueError, match=msg):
backward(n_features=-0.5, min_change=None)
+
+def test_min_change_greater_than_zero_backward():
+ """
+ Check `backward()`'s `min_change` raises when
+ not greater than zero
+ """
msg = "`min_change` must be greater than zero."
with pytest.raises(ValueError, match=msg):
backward(n_features=None, min_change=-0.75)
+
+def test_min_change_fails_on_string_backward():
+ """
+ Check that backward raises when passed a string
+ for `min_change`.
+ """
+ msg = "`min_change` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ backward(min_change='-0.75', n_features=None)
+
+
+def test_n_features_fails_on_string_backward():
+ """
+ Check that backward raises when passed a string
+ for `n_features`.
+ """
+ msg = "`n_features` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ backward(min_change=None, n_features='-0.75')
+
+
+def test_both_non_none_backward():
+ """
+ Check `backward()` raise when at least one
+ of `min_change` or `n_features` are not None.
+ """
# Note: items in backticks (``) will be in alphabetical order.
msg = "At least one of `min_change` and `n_features` must be None."
with pytest.raises(TypeError, match=msg):
backward(n_features=0.5, min_change=0.3)
-
- msg = "`criterion` must be one of: None, 'aic', 'bic'."
- with pytest.raises(ValueError, match=msg):
- backward(n_features=0.5, criterion='acc')
+
+def test_float_greater_than_one_raises_backward():
+ """
+ Test that float values not on (0, 1) raise.
+ """
msg = "^If a float, `n_features` must be on"
with pytest.raises(ValueError, match=msg):
backward(n_features=1.5)
-
+
+
+def test_min_features_requirement_backward():
+ """
+ Check that the requirement that at least
+ two features must be present.
+ """
msg = "less than 2 features present."
with pytest.raises(IndexError, match=msg):
- backward(X_train=ones((501, 1)), X_val=ones((501, 1)))
\ No newline at end of file
+ backward(X_train=ones((501, 1)), X_val=ones((501, 1)))
+
+
+# -----------------------------------------------------------------------------
+# Test Exhausting loop
+# -----------------------------------------------------------------------------
+
+def test_loop_exhaust():
+ """Text Exhausting backward()'s loop."""
+ backward(n_features=X_train.shape[-1], min_change=None, _do_not_skip=False)
diff --git a/tests/test_criterion.py b/tests/test_criterion.py
index 9e2b8ff..2773219 100644
--- a/tests/test_criterion.py
+++ b/tests/test_criterion.py
@@ -1,8 +1,8 @@
-"""
-
- Criterion Tests
- ~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Criterion Tests
+===============
"""
import os
import sys
@@ -15,6 +15,7 @@ import statsmodels.api as sm
from pypunisher.metrics.criterion import aic, bic
from sklearn.linear_model import LinearRegression
from tests._test_data import X_train, y_train
+from tests._wrappers import forward, backward
COMP_TOLERANCE = 200 # comparision tolerance between floats
@@ -49,6 +50,22 @@ def test_metric_model_param():
metric(kind, X_train=X_train, y_train=y_train)
+# -----------------------------------------------------------------------------
+# Test criterion through selection
+# -----------------------------------------------------------------------------
+
+
+def test_selection_class_use_of_criterion():
+ """Test Criterion through `forward()` and `backward()."""
+
+ msg = "`criterion` must be one of: None, 'aic', 'bic'."
+ with pytest.raises(ValueError, match=msg):
+ forward(min_change=0.5, criterion='acc')
+
+ with pytest.raises(ValueError, match=msg):
+ backward(n_features=0.5, criterion='Santa')
+
+
# -----------------------------------------------------------------------------
# `data` Param
# -----------------------------------------------------------------------------
@@ -68,6 +85,7 @@ def test_metric_data_param():
else:
metric(sk_model, X_train=kind, y_train=y_train)
+
# -----------------------------------------------------------------------------
# Metric output
# -----------------------------------------------------------------------------
diff --git a/tests/test_forward_selection.py b/tests/test_forward_selection.py
index fa62b01..93d27f0 100644
--- a/tests/test_forward_selection.py
+++ b/tests/test_forward_selection.py
@@ -1,8 +1,8 @@
-"""
-
- Tests Specific to Forward Selection
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Tests Specific to Forward Selection
+===================================
"""
import os
import sys
@@ -12,25 +12,68 @@ sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
from tests._wrappers import forward
+from tests._test_data import X_train
+
+
+# -----------------------------------------------------------------------------
+# Test `forward()` Params
+# -----------------------------------------------------------------------------
+
+
+def test_n_features_greater_than_zero_forward():
+ """
+ Check that `n_features` is required to be > 0.
+ """
+ msg = "`n_features` must be greater than zero."
+ with pytest.raises(ValueError, match=msg):
+ forward(min_change=None, n_features=-0.75)
-def test_forward_params():
+def test_min_change_greater_than_zero_forward():
"""
- Check parameters to `forward()` raise when expected.
+ Check that `min_change` is required to be > 0.
"""
msg = "`min_change` must be greater than zero."
with pytest.raises(ValueError, match=msg):
forward(min_change=-0.5, n_features=None)
- msg = "`n_features` must be greater than zero."
- with pytest.raises(ValueError, match=msg):
- forward(min_change=None, n_features=-0.75)
+def test_n_features_fails_on_string_forward():
+ """
+ Check that forward raises when passed a string
+ for `n_features`.
+ """
+ msg = "`n_features` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ forward(min_change=None, n_features='-0.75')
+
+
+def test_min_change_fails_on_string_forward():
+ """
+ Check that forward raises when passed a string
+ for `min_change`.
+ """
+ msg = "`min_change` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ forward(min_change='-0.75', n_features=None)
+
+
+def test_both_non_none_forward():
+ """
+ Check `forward()` raise when at least one
+ of `min_change` or `n_features` are not None.
+ """
# Note: items in backticks (``) will be in alphabetical order.
msg = "At least one of `min_change` and `n_features` must be None."
with pytest.raises(TypeError, match=msg):
forward(min_change=0.5, n_features=0.3)
-
- msg = "`criterion` must be one of: None, 'aic', 'bic'."
- with pytest.raises(ValueError, match=msg):
- forward(min_change=0.5, criterion='acc')
+
+
+# -----------------------------------------------------------------------------
+# Test Exhausting loop
+# -----------------------------------------------------------------------------
+
+def test_loop_exhaust():
+ """Text Exhausting forwards()'s loop."""
+ # Should not raise.
+ forward(n_features=X_train.shape[-1], min_change=None, _do_not_skip=False)
diff --git a/tests/test_selection.py b/tests/test_selection.py
index e3ee45a..2eb6098 100644
--- a/tests/test_selection.py
+++ b/tests/test_selection.py
@@ -1,8 +1,8 @@
-"""
-
- Run Tests Common to Forward and Backward Selection
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Run Tests Common to Forward and Backward Selection
+==================================================
"""
import os
import sys
@@ -14,7 +14,7 @@ sys.path.insert(0, os.path.abspath("../"))
from pypunisher import Selection
from tests._wrappers import forward, backward
-from tests._test_data import TRUE_BEST_FEATURE
+from tests._test_data import true_best_feature
from tests._defaults import DEFAULT_SELECTION_PARAMS
@@ -75,6 +75,18 @@ def test_sklearn_model_methods():
with pytest.raises(AttributeError):
Selection(**d)
+# -----------------------------------------------------------------------------
+# Test Multiple Features
+# -----------------------------------------------------------------------------
+
+
+def test_passing_significant_change():
+ """
+ Test cases where there is a significant `min_change`
+ during backward selection.
+ """
+ backward(n_features=None, min_change=1, _last_score_punt=True)
+
# -----------------------------------------------------------------------------
# Outputs: Run the Forward and Backward Selection Algorithms
@@ -88,7 +100,7 @@ forward_output += forward(n_features=1, min_change=None)
# Force the backward selection algorithm to
# select the single feature it thinks is most predictive.
# If implemented correctly, `backward()` should be able to
-# identify `TRUE_BEST_FEATURE` as predictive.
+# identify `true_best_feature` as predictive.
backward_output = backward(n_features=1)
# Run using the other parameter option
@@ -142,7 +154,7 @@ def output_values(output):
in the contrived data.
"""
msg = "The algorithm failed to select the predictive feature."
- assert TRUE_BEST_FEATURE in output, msg
+ assert true_best_feature in output, msg
def test_fsel_output_values():
@@ -206,4 +218,4 @@ def test_fsel_verbose_output():
def test_bsel_verbose_output():
backward_output = backward(n_features=2, min_change=None, verbose=True)
- assert len(backward_output) >= 1
\ No newline at end of file
+ assert len(backward_output) >= 1
jakirkham: LGTM. Thanks @martindurant.
Were you running into some bad behavior because of this or was it just slow?
martindurant: @jakirkham , this is specifically in response to #3248 . Something more sophisticated might be useful in any more complicated case, to minimise the calls to astype.
jakirkham: Thanks for the info.
Would expect that endianness would be preserved by `promote_types`. Though resolving conflicts between different endians is less clear (and may be at the crux of this issue).
Didn't see anything that matched on the NumPy issue tracker so raised as issue ( https://github.com/numpy/numpy/issues/10767 ).
diff --git a/dask/array/core.py b/dask/array/core.py
index a4990fe95..1d3d54f75 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -1208,7 +1208,12 @@ class Array(Base):
@wraps(store)
def store(self, target, **kwargs):
- return store([self], [target], **kwargs)
+ r = store([self], [target], **kwargs)
+
+ if kwargs.get("return_stored", False):
+ r = r[0]
+
+ return r
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
@@ -2611,8 +2616,12 @@ def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
- dt = reduce(np.promote_types, [a.dtype for a in seq])
- seq = [x.astype(dt) for x in seq]
+ seq_dtypes = [a.dtype for a in seq]
+ if len(set(seq_dtypes)) > 1:
+ dt = reduce(np.promote_types, seq_dtypes)
+ seq = [x.astype(dt) for x in seq]
+ else:
+ dt = seq_dtypes[0]
names = [a.name for a in seq]
optimization of array.concatenate depends strongly on endianness
I have encountered a dask optimization issue that I think is at the core of xgcm/xmitgcm#73.
Basically, I am constructing a big dask array by concatenating many numpy memmaps, each created within a `from_delayed` function. Then I want to get back out a single value from this big array. In theory this should go very fast and use very little memory, as if I had accessed the original memmap. And indeed it does...unless the dtype is big endian!
Here is how to reproduce this issue. First create some test data
```python
import numpy as np
import dask
import dask.array
# create some example binary data (8GB)
# large dataset is necessary to see timing differences
shape = (1, 50, 1000, 2000)
nfiles = 10
dtype = np.dtype('f8') # note: this doesn't matter at this stage
data = np.zeros(shape, dtype)
filenames = ['data.{:04d}'.format(n) for n in range(nfiles)]
for fname in filenames:
data.tofile(fname)
```
Now define some functions to read it into dask arrays
```python
def read_as_memmap(fname, dtype):
return np.memmap(fname, dtype=dtype, shape=shape)
def lazy_memmap(fname, dtype):
return dask.array.from_delayed(
dask.delayed(read_as_memmap)(fname, dtype),
shape, dtype)
def read_all_data(dtype):
return dask.array.concatenate(
[lazy_memmap(fname, dtype) for fname in filenames])
```
Now perform a timing test on reading back a single value with default (little endian) datatype
```python
dtype = np.dtype('f8')
all_data = read_all_data(dtype)
%timeit lazy_memmap(filenames[-1], dtype)[0, 0, 0].compute()
%timeit all_data[-1, 0, 0, 0].compute()
```
On my machine I get
```
100 loops, best of 3: 2.54 ms per loop
100 loops, best of 3: 2.3 ms per loop
```
basically identical and very fast, as we expect. The dask graph for `all_data[-1, 0, 0, 0]` looks like this:

Instead, if I repeat the test with a big-endian dtype
```python
dtype = np.dtype('>f8')
all_data = read_all_data(dtype)
%timeit lazy_memmap(filenames[-1], dtype)[0, 0, 0].compute()
%timeit all_data[-1, 0, 0, 0].compute()
```
I get this:
```
100 loops, best of 3: 2.57 ms per loop
1 loop, best of 3: 929 ms per loop
```
The `ResourceProfiler` diagnostics also indicate much higher memory usage. Now the dask graph looks like this

There appears to be an extra call to `astype` which is interfering with the optimization somehow.
I'm using dask version 0.17.1.
dask/dask
diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index cc233f4b4..87f420c25 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -335,6 +335,17 @@ def test_concatenate():
pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))
[email protected]('dtypes', [(('>f8', '>f8'), '>f8'),
+ (('<f4', '<f8'), '<f8')])
+def test_concatenate_types(dtypes):
+ dts_in, dt_out = dtypes
+ arrs = [np.zeros(4, dtype=dt) for dt in dts_in]
+ darrs = [from_array(arr, chunks=(2,)) for arr in arrs]
+
+ x = concatenate(darrs, axis=0)
+ assert x.dtype == dt_out
+
+
def test_concatenate_unknown_axes():
dd = pytest.importorskip('dask.dataframe')
pd = pytest.importorskip('pandas')
@@ -1513,6 +1524,26 @@ def test_store_locks():
assert lock.acquire_count == nchunks
+def test_store_method_return():
+ d = da.ones((10, 10), chunks=(2, 2))
+ a = d + 1
+
+ for compute in [False, True]:
+ for return_stored in [False, True]:
+ at = np.zeros(shape=(10, 10))
+ r = a.store(
+ at, get=dask.threaded.get,
+ compute=compute, return_stored=return_stored
+ )
+
+ if return_stored:
+ assert isinstance(r, Array)
+ elif compute:
+ assert r is None
+ else:
+ assert isinstance(r, Delayed)
+
+
@pytest.mark.xfail(reason="can't lock with multiprocessing")
def test_store_multiprocessing_lock():
d = da.ones((10, 10), chunks=(2, 2))
diff --git a/odml/property.py b/odml/property.py
index 74e31f7..894296d 100644
--- a/odml/property.py
+++ b/odml/property.py
@@ -22,9 +22,9 @@ class BaseProperty(base.baseobject, Property):
dependency=None, dependency_value=None, dtype=None,
value_origin=None, id=None):
"""
- Create a new Property with a single value. The method will try to infer
- the value's dtype from the type of the value if not explicitly stated.
- Example for a property with
+ Create a new Property. If a value without an explicitly stated dtype
+ has been provided, the method will try to infer the value's dtype.
+ Example:
>>> p = Property("property1", "a string")
>>> p.dtype
>>> str
@@ -34,21 +34,25 @@ class BaseProperty(base.baseobject, Property):
>>> p = Property("prop", [2, 3, 4])
>>> p.dtype
>>> int
- :param name: The name of the property
- :param value: Some data value, this may be a list of homogeneous values
+ :param name: The name of the property.
+ :param value: Some data value, it can be a single value or
+ a list of homogeneous values.
:param unit: The unit of the stored data.
- :param uncertainty: the uncertainty (e.g. the standard deviation)
+ :param uncertainty: The uncertainty (e.g. the standard deviation)
associated with a measure value.
:param reference: A reference (e.g. an URL) to an external definition
of the value.
:param definition: The definition of the property.
:param dependency: Another property this property depends on.
:param dependency_value: Dependency on a certain value.
- :param dtype: the data type of the values stored in the property,
- if dtype is not given, the type is deduced from the values
+ :param dtype: The data type of the values stored in the property,
+ if dtype is not given, the type is deduced from the values.
+ Check odml.DType for supported data types.
:param value_origin: Reference where the value originated from e.g. a file name.
+ :param id: UUID string as specified in RFC 4122. If no id is provided,
+ an id will be generated and assigned. An id has to be unique
+ within an odML Document.
"""
- # TODO validate arguments
try:
if id is not None:
self._id = str(uuid.UUID(id))
@@ -84,7 +88,7 @@ class BaseProperty(base.baseobject, Property):
def new_id(self, id=None):
"""
- new_id sets the id of the current object to a RFC 4122 compliant UUID.
+ new_id sets the id of the current object to an RFC 4122 compliant UUID.
If an id was provided, it is assigned if it is RFC 4122 UUID format compliant.
If no id was provided, a new UUID is generated and assigned.
:param id: UUID string as specified in RFC 4122.
@@ -108,7 +112,7 @@ class BaseProperty(base.baseobject, Property):
@property
def dtype(self):
"""
- The data type of the value
+ The data type of the value. Check odml.DType for supported data types.
"""
return self._dtype
@@ -116,11 +120,9 @@ class BaseProperty(base.baseobject, Property):
def dtype(self, new_type):
"""
If the data type of a property value is changed, it is tried
- to convert the value to the new type.
- If this doesn't work, the change is refused.
-
- This behaviour can be overridden by directly accessing the *_dtype*
- attribute and adjusting the *data* attribute manually.
+ to convert existing values to the new type. If this doesn't work,
+ the change is refused. The dtype can always be changed, if
+ a Property does not contain values.
"""
# check if this is a valid type
if not dtypes.valid_type(new_type):
@@ -139,7 +141,7 @@ class BaseProperty(base.baseobject, Property):
@property
def parent(self):
"""
- The section containing this property
+ The section containing this property.
"""
return self._parent
@@ -170,29 +172,30 @@ class BaseProperty(base.baseobject, Property):
@property
def value(self):
"""
- Returns the value(s) stored in this property. Method always returns a list that
- is a copy (!) of the stored value. Changing this list will NOT change the property.
- For manipulation of the stored values use the append, extend, and direct access methods
- (using brackets).
+ Returns the value(s) stored in this property. Method always returns a list
+ that is a copy (!) of the stored value. Changing this list will NOT change
+ the property.
+ For manipulation of the stored values use the append, extend, and direct
+ access methods (using brackets).
For example:
- >> p = odml.Property("prop", value=[1, 2, 3])
- >> print(p.value)
+ >>> p = odml.Property("prop", value=[1, 2, 3])
+ >>> print(p.value)
[1, 2, 3]
- >> p.value.append(4)
- >> print(p.value)
+ >>> p.value.append(4)
+ >>> print(p.value)
[1, 2, 3]
Individual values can be accessed and manipulated like this:
>>> print(p[0])
[1]
- >> p[0] = 4
- >> print(p[0])
+ >>> p[0] = 4
+ >>> print(p[0])
[4]
The values can be iterated e.g. with a loop:
- >> for v in p.value:
- print(v)
+ >>> for v in p.value:
+ >>> print(v)
4
2
3
@@ -201,18 +204,18 @@ class BaseProperty(base.baseobject, Property):
def value_str(self, index=0):
"""
- Used to access typed data of the value as a string.
- Use data to access the raw type, i.e.:
+ Used to access typed data of the value at a specific
+ index position as a string.
"""
return dtypes.set(self._value[index], self._dtype)
def _validate_values(self, values):
"""
- Method ensures that the passed value(s) can be cast to the
- same dtype, i.e. that associated with this property or the
- inferred dtype of the first entry of the values list.
+ Method ensures that the passed value(s) can be cast to the
+ same dtype, i.e. that are associated with this property or the
+ inferred dtype of the first entry of the values list.
- :param values an iterable that contains the values
+ :param values: an iterable that contains the values.
"""
for v in values:
try:
@@ -227,7 +230,7 @@ class BaseProperty(base.baseobject, Property):
If new_value is a string, it will convert it to a list of
strings if the new_value contains embracing brackets.
- returns list of new_value
+ :return: list of new_value
"""
if isinstance(new_value, str):
if new_value[0] == "[" and new_value[-1] == "]":
@@ -241,21 +244,22 @@ class BaseProperty(base.baseobject, Property):
elif not isinstance(new_value, list):
new_value = [new_value]
else:
- raise ValueError("odml.Property._convert_value_input: unsupported data type for values: %s" % type(new_value))
+ raise ValueError("odml.Property._convert_value_input: "
+ "unsupported data type for values: %s" % type(new_value))
return new_value
@value.setter
def value(self, new_value):
"""
-
Set the value of the property discarding any previous information.
Method will try to convert the passed value to the dtype of
- the property and raise an ValueError, if not possible
+ the property and raise an ValueError if not possible.
- :param new_value a single value or list of values.
+ :param new_value: a single value or list of values.
"""
# Make sure boolean value 'False' gets through as well...
- if new_value is None or (isinstance(new_value, (list, tuple, str)) and len(new_value) == 0):
+ if new_value is None or \
+ (isinstance(new_value, (list, tuple, str)) and len(new_value) == 0):
self._value = []
return
@@ -285,6 +289,8 @@ class BaseProperty(base.baseobject, Property):
@uncertainty.setter
def uncertainty(self, new_value):
+ if new_value == "":
+ new_value = None
self._uncertainty = new_value
@property
@@ -339,9 +345,9 @@ class BaseProperty(base.baseobject, Property):
def remove(self, value):
"""
- Remove a value from this property and unset its parent.
- Raises a TypeError if this would cause the property not to hold any
- value at all. This can be circumvented by using the *_values* property.
+ Remove a value from this property. Only the first encountered
+ occurrence of the passed in value is removed from the properties
+ list of values.
"""
if value in self._value:
self._value.remove(value)
@@ -358,6 +364,7 @@ class BaseProperty(base.baseobject, Property):
def clone(self):
"""
Clone this object to copy it independently to another document.
+ The id of the cloned object will be set to a different uuid.
"""
obj = super(BaseProperty, self).clone()
obj._parent = None
@@ -367,23 +374,23 @@ class BaseProperty(base.baseobject, Property):
return obj
def merge(self, other, strict=True):
- """Merges the property 'other' into self, if possible. Information
- will be synchronized. Method will raise an ValueError when the
+ """
+ Merges the property 'other' into self, if possible. Information
+ will be synchronized. Method will raise a ValueError when the
information in this property and the passed property are in
conflict.
- :param other a Property
- :param strict Bool value to indicate whether types should be
- implicitly converted even when information may be lost. Default is True, i.e. no conversion, and error will be raised if types do not match.
-
+ :param other: an odML Property.
+ :param strict: Bool value to indicate whether types should be implicitly converted
+ even when information may be lost. Default is True, i.e. no conversion,
+ and a ValueError will be raised if types do not match.
"""
- assert(isinstance(other, (BaseProperty)))
+ assert(isinstance(other, BaseProperty))
if strict and self.dtype != other.dtype:
raise ValueError("odml.Property.merge: src and dest dtypes do not match!")
if self.unit is not None and other.unit is not None and self.unit != other.unit:
- raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!"
- % (other.unit, self.unit))
+ raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!" % (other.unit, self.unit))
if self.definition is not None and other.definition is not None:
self_def = ''.join(map(str.strip, self.definition.split())).lower()
@@ -422,14 +429,14 @@ class BaseProperty(base.baseobject, Property):
def unmerge(self, other):
"""
- Stub that doesn't do anything for this class
+ Stub that doesn't do anything for this class.
"""
pass
def get_merged_equivalent(self):
"""
- Return the merged object (i.e. if the section is linked to another one,
- return the corresponding property of the linked section) or None
+ Return the merged object (i.e. if the parent section is linked to another one,
+ return the corresponding property of the linked section) or None.
"""
if self.parent is None or self.parent._merged is None:
return None
@@ -466,17 +473,18 @@ class BaseProperty(base.baseobject, Property):
def extend(self, obj, strict=True):
"""
- Extend the list of values stored in this property by the passed values. Method will
- raise an ValueError, if values cannot be converted to the current dtype. One can also pass
- another Property to append all values stored in that one. In this case units must match!
+ Extend the list of values stored in this property by the passed values. Method
+ will raise a ValueError, if values cannot be converted to the current dtype.
+ One can also pass another Property to append all values stored in that one.
+ In this case units must match!
- :param obj single value, list of values or Property
- :param strict a Bool that controls whether dtypes must match. Default is True.
+ :param obj: single value, list of values or a Property.
+ :param strict: a Bool that controls whether dtypes must match. Default is True.
"""
if isinstance(obj, BaseProperty):
- if (obj.unit != self.unit):
- raise ValueError("odml.Property.append: src and dest units (%s, %s) do not match!"
- % (obj.unit, self.unit))
+ if obj.unit != self.unit:
+ raise ValueError("odml.Property.extend: src and dest units (%s, %s) "
+ "do not match!" % (obj.unit, self.unit))
self.extend(obj.value)
return
@@ -486,29 +494,41 @@ class BaseProperty(base.baseobject, Property):
new_value = self._convert_value_input(obj)
if len(new_value) > 0 and strict and dtypes.infer_dtype(new_value[0]) != self.dtype:
- raise ValueError("odml.Property.extend: passed value data type does not match dtype!");
+ raise ValueError("odml.Property.extend: "
+ "passed value data type does not match dtype!")
if not self._validate_values(new_value):
- raise ValueError("odml.Property.append: passed value(s) cannot be converted to "
- "data type \'%s\'!" % self._dtype)
+ raise ValueError("odml.Property.extend: passed value(s) cannot be converted "
+ "to data type \'%s\'!" % self._dtype)
self._value.extend([dtypes.get(v, self.dtype) for v in new_value])
def append(self, obj, strict=True):
"""
- Append a single value to the list of stored values. Method will raise an ValueError if
- the passed value cannot be converted to the current dtype.
+ Append a single value to the list of stored values. Method will raise
+ a ValueError if the passed value cannot be converted to the current dtype.
- :param obj the additional value.
- :param strict a Bool that controls whether dtypes must match. Default is True.
+ :param obj: the additional value.
+ :param strict: a Bool that controls whether dtypes must match. Default is True.
"""
+ # Ignore empty values before nasty stuff happens, but make sure
+ # 0 and False get through.
+ if obj in [None, "", [], {}]:
+ return
+
+ if not self.value:
+ self.value = obj
+ return
+
new_value = self._convert_value_input(obj)
if len(new_value) > 1:
raise ValueError("odml.property.append: Use extend to add a list of values!")
+
if len(new_value) > 0 and strict and dtypes.infer_dtype(new_value[0]) != self.dtype:
- raise ValueError("odml.Property.extend: passed value data type does not match dtype!");
+ raise ValueError("odml.Property.append: "
+ "passed value data type does not match dtype!")
if not self._validate_values(new_value):
- raise ValueError("odml.Property.append: passed value(s) cannot be converted to "
- "data type \'%s\'!" % self._dtype)
- self._value.append(dtypes.get(new_value[0], self.dtype))
+ raise ValueError("odml.Property.append: passed value(s) cannot be converted "
+ "to data type \'%s\'!" % self._dtype)
+ self._value.append(dtypes.get(new_value[0], self.dtype))
Property.append returns dtype error on unset dtype
When using `Property.append` of a Property where neither value nor dtype are set, a dtype mismatch related ValueError is raised.
diff --git a/tornado/ioloop.py b/tornado/ioloop.py
index f6ec177b..48700139 100644
--- a/tornado/ioloop.py
+++ b/tornado/ioloop.py
@@ -1213,11 +1213,31 @@ class PeriodicCallback(object):
def _schedule_next(self):
if self._running:
- current_time = self.io_loop.time()
-
- if self._next_timeout <= current_time:
- callback_time_sec = self.callback_time / 1000.0
- self._next_timeout += (math.floor((current_time - self._next_timeout) /
- callback_time_sec) + 1) * callback_time_sec
-
+ self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _update_next(self, current_time):
+ callback_time_sec = self.callback_time / 1000.0
+ if self._next_timeout <= current_time:
+ # The period should be measured from the start of one call
+ # to the start of the next. If one call takes too long,
+ # skip cycles to get back to a multiple of the original
+ # schedule.
+ self._next_timeout += (math.floor((current_time - self._next_timeout) /
+ callback_time_sec) + 1) * callback_time_sec
+ else:
+ # If the clock moved backwards, ensure we advance the next
+ # timeout instead of recomputing the same value again.
+ # This may result in long gaps between callbacks if the
+ # clock jumps backwards by a lot, but the far more common
+ # scenario is a small NTP adjustment that should just be
+ # ignored.
+ #
+ # Note that on some systems if time.time() runs slower
+ # than time.monotonic() (most common on windows), we
+ # effectively experience a small backwards time jump on
+ # every iteration because PeriodicCallback uses
+ # time.time() while asyncio schedules callbacks using
+ # time.monotonic().
+ # https://github.com/tornadoweb/tornado/issues/2333
+ self._next_timeout += callback_time_sec
ioloop: PeriodicCallback executes too often on windows
## Here is the code:
import math
import logging
from crontab import CronTab
from tornado.ioloop import PeriodicCallback, IOLoop
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CronTabCallback(PeriodicCallback):
def __init__(self, callback, schedule):
self._callback = callback
self._crontab = CronTab(schedule)
super(CronTabCallback, self).__init__(self.run, self._calc_callbacktime())
def _calc_callbacktime(self, now=None):
return math.ceil(self._crontab.next(now)) * 1000.0
def run(self):
return self._callback()
def _schedule_next(self):
self.callback_time = self._calc_callbacktime()
logging.info('calc ---------------------')
logging.info('delay %s' % self.callback_time)
logging.info('last execute %s' % self._next_timeout)
last = self._next_timeout
super(CronTabCallback, self)._schedule_next()
if last == self._next_timeout:
logging.error('error !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
logging.info('current: %s' % self.io_loop.time())
logging.info('calc next: %s' % self._next_timeout)
logging.info('--------------------------\n')
def crontab(schedule):
def decorator(func):
CronTabCallback(func, schedule).start()
return func
return decorator
@crontab('*/1 * * * *')
def run():
logging.info('execute ... \n')
if __name__ == '__main__':
IOLoop.current().start()
## Here is the console log
2018-03-30 11:33:00,311 - asyncio - DEBUG - Using selector: SelectSelector
2018-03-30 11:33:00,316 - root - INFO - calc ---------------------
2018-03-30 11:33:00,316 - root - INFO - delay 60000.0
2018-03-30 11:33:00,316 - root - INFO - last execute 1522380780.3169544
2018-03-30 11:33:00,316 - root - INFO - current: 1522380780.3169544
2018-03-30 11:33:00,316 - root - INFO - **calc next: 1522380840.3169544**
2018-03-30 11:33:00,316 - root - INFO - --------------------------
**2018-03-30 11:34:00,313** - root - INFO - execute ...
2018-03-30 11:34:00,313 - root - INFO - calc ---------------------
2018-03-30 11:34:00,313 - root - INFO - delay 60000.0
2018-03-30 11:34:00,313 - root - INFO - last execute 1522380840.3169544
2018-03-30 11:34:00,313 - root - ERROR - error !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2018-03-30 11:34:00,313 - root - INFO - current: 1522380840.3139544
2018-03-30 11:34:00,313 - root - INFO - calc next: 1522380840.3169544
2018-03-30 11:34:00,313 - root - INFO - --------------------------
2018-03-30 11:34:00,318 - root - INFO - execute ...
2018-03-30 11:34:00,318 - root - INFO - calc ---------------------
2018-03-30 11:34:00,318 - root - INFO - delay 60000.0
2018-03-30 11:34:00,318 - root - INFO - last execute 1522380840.3169544
2018-03-30 11:34:00,318 - root - INFO - current: 1522380840.3189545
2018-03-30 11:34:00,318 - root - INFO - calc next: 1522380900.3169544
2018-03-30 11:34:00,318 - root - INFO - --------------------------
## Environment:
OS: windows 7
Python: python 3.6
Dependent library: crontab 0.22.0
Tornado 4.5.1 python2 (ok) python3(ok)
Tornado 5.0.1 python2 (ok) python3 (linux ok, window has issue)
tornadoweb/tornado
diff --git a/tornado/test/ioloop_test.py b/tornado/test/ioloop_test.py
index 09f71c5d..9f7c1847 100644
--- a/tornado/test/ioloop_test.py
+++ b/tornado/test/ioloop_test.py
@@ -789,6 +789,62 @@ class TestPeriodicCallback(unittest.TestCase):
io_loop.close()
+class TestPeriodicCallbackMath(unittest.TestCase):
+ def simulate_calls(self, pc, durations):
+ """Simulate a series of calls to the PeriodicCallback.
+
+ Pass a list of call durations in seconds (negative values
+ work to simulate clock adjustments during the call, or more or
+ less equivalently, between calls). This method returns the
+ times at which each call would be made.
+ """
+ calls = []
+ now = 1000
+ pc._next_timeout = now
+ for d in durations:
+ pc._update_next(now)
+ calls.append(pc._next_timeout)
+ now = pc._next_timeout + d
+ return calls
+
+ def test_basic(self):
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, [0] * 5),
+ [1010, 1020, 1030, 1040, 1050])
+
+ def test_overrun(self):
+ # If a call runs for too long, we skip entire cycles to get
+ # back on schedule.
+ call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
+ expected = [
+ 1010, 1020, 1030, # first 3 calls on schedule
+ 1050, 1070, # next 2 delayed one cycle
+ 1100, 1130, # next 2 delayed 2 cycles
+ 1170, 1210, # next 2 delayed 3 cycles
+ 1220, 1230, # then back on schedule.
+ ]
+
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, call_durations),
+ expected)
+
+ def test_clock_backwards(self):
+ pc = PeriodicCallback(None, 10000)
+ # Backwards jumps are ignored, potentially resulting in a
+ # slightly slow schedule (although we assume that when
+ # time.time() and time.monotonic() are different, time.time()
+ # is getting adjusted by NTP and is therefore more accurate)
+ self.assertEqual(self.simulate_calls(pc, [-2, -1, -3, -2, 0]),
+ [1010, 1020, 1030, 1040, 1050])
+
+ # For big jumps, we should perhaps alter the schedule, but we
+ # don't currently. This trace shows that we run callbacks
+ # every 10s of time.time(), but the first and second calls are
+ # 110s of real time apart because the backwards jump is
+ # ignored.
+ self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]),
+ [1010, 1020, 1030])
+
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
statements = [
coveralls:
[](https://coveralls.io/builds/16288275)
Coverage remained the same at 100.0% when pulling **19757302b91677ce5e659bbb28fc124dd5ef4ab7 on janjitsu:master** into **9e1660516600a94f949259465c371acf7256f5ae on andreroggeri:master**.
andreroggeri: Muito obrigado @janjitsu 🤑
Apple-clang 9.1 not supported
Please, add apple-clang 9.1 to supported platform. I had problem with compile missing libraries. I fixed it with manually edited ```.conan/settings.yml```
My CMake output:
```
-- The CXX compiler identification is AppleClang 9.1.0.9020039
```
Clang version:
```sh
$ clang++ -v
Apple LLVM version 9.1.0 (clang-902.0.39.1)
Target: x86_64-apple-darwin17.5.0
Thread model: posix
InstalledDir: /Library/Developer/CommandLineTools/usr/bin
```
Conan version:
```
$ conan -v
Conan version 1.2.0
```
diff --git a/conans/client/userio.py b/conans/client/userio.py
index b42d8e39d..c64d67ff9 100644
--- a/conans/client/userio.py
+++ b/conans/client/userio.py
@@ -40,6 +40,7 @@ class UserIO(object):
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
+ self._raise_if_non_interactive()
user_input = ''
while not username:
try:
Non interactive mode also prevents prompt from `conan user`
The new non interactive mode also prevents the prompt from `conan user <name> -p`. This is not what is specified in the documentation. (And also not what was originally implemented.)
I hope the prompt can be re-enabled for this particular case. If not, the documentation should be changed.
Oh, and a purely cosmetic remark: the non interactive error only appears after the prompt has been displayed.
Using Conan 1.2 on Windows 7.
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
main() takes exactly 1 argument (0 given)
I'm getting the following error, introduced in 48d22e4d62bb6e216cd8932739ea0be4735e2588 (Determined by `git bisect`). I don't have the time to dig right now, but I will later.
```
$ aws-google-auth --help
Traceback (most recent call last):
File "/Users/mide/virtualenv/aws-google-auth-dev/bin/aws-google-auth", line 11, in <module>
load_entry_point('aws-google-auth', 'console_scripts', 'aws-google-auth')()
TypeError: main() takes exactly 1 argument (0 given)
```
cevoaustralia/aws-google-auth
diff --git a/aws_google_auth/tests/test_init.py b/aws_google_auth/tests/test_init.py
index 9a298a8..cd818d2 100644
--- a/aws_google_auth/tests/test_init.py
+++ b/aws_google_auth/tests/test_init.py
@@ -11,6 +11,20 @@ class TestInit(unittest.TestCase):
def setUp(self):
pass
+ @patch('aws_google_auth.cli', spec=True)
+ def test_main_method_has_no_parameters(self, mock_cli):
+ """
+ This is the entrypoint for the cli tool, and should require no parameters
+
+ :param mock_cli:
+ :return:
+ """
+
+ # Function under test
+ aws_google_auth.main()
+
+ self.assertTrue(mock_cli.called)
+
@patch('aws_google_auth.exit_if_unsupported_python', spec=True)
@patch('aws_google_auth.resolve_config', spec=True)
@patch('aws_google_auth.process_auth', spec=True)
@@ -22,7 +36,7 @@ class TestInit(unittest.TestCase):
aws_google_auth.resolve_config.return_value = mock_config
# Function under test
- aws_google_auth.main([])
+ aws_google_auth.cli([])
self.assertTrue(exit_if_unsupported_python.called)
self.assertTrue(resolve_config.called)
diff --git a/setup.py b/setup.py
index c322c9e..7827c73 100755
--- a/setup.py
+++ b/setup.py
@@ -7,20 +7,18 @@ from setuptools import setup, find_packages
# project specific parameters
PROJECT_NAME = 'friendlypins'
PROJECT_DEPENDENCIES = [
- #'requests[security]>=2.0.1',
- 'requests',
- 'six',
- 'tqdm']
+ 'requests<3.0.0,>=2.0.0',
+ 'six<2.0.0,>=1.0.0',]
PROJECT_DEV_DEPENDENCIES = [
- 'wheel',
- 'twine',
- 'pytest',
- 'pytest-cov',
- 'mock',
- 'radon',
- 'pylint',
- 'sphinx>=1.2.3',
- 'tox']
+ 'wheel<1.0.0',
+ 'twine<2.0.0',
+ 'pytest>=3.5.0<4.0.0',
+ 'pytest-cov>=2.5.0<3.0.0',
+ 'mock>=2.0.0<3.0.0',
+ 'radon>=2.2.0<3.0.0',
+ 'pylint>=1.8.0<2.0.0',
+ 'sphinx>=1.2.3<2.0.0',
+ 'tox>=3.0.0<4.0.0']
PROJECT_DESCRIPTION = 'Python wrapper around the Pinterest developer APIs'
PROJECT_KEYWORDS = 'pinterest api wrapper library'
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index 8b28950..c9540e1 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -1,10 +1,25 @@
"""Primary entry point for the Friendly Pinterest library"""
from __future__ import print_function
+import logging
+import requests
+from friendlypins.user import User
class API(object): # pylint: disable=too-few-public-methods
"""High level abstraction for the core Pinterest API"""
- def __init__(self):
- self.name = "hello"
+
+ # URL of the root namespace for the Pinterest API
+ _root_url = 'https://api.pinterest.com/v1'
+
+ def __init__(self, personal_access_token):
+ """Constructor
+
+ :param str personal_access_token:
+ API authentication token used for secure access to a users'
+ Pinterest data
+ """
+
+ self._log = logging.getLogger(__name__)
+ self._token = personal_access_token
def get_user(self, username=None):
"""Gets all primitives associated with a particular Pinterst user
@@ -15,11 +30,18 @@ class API(object): # pylint: disable=too-few-public-methods
returns: Pinterest user with the given name
rtype: :class:`friendly_pinterest.user.User`
"""
- print(self.name)
if username:
- return None
- return None
+ raise NotImplementedError(
+ "Querying arbitrary Pinerest users is not yet supported.")
+ else:
+ temp_url = "{0}/me".format(self._root_url)
+ temp_url += "?access_token={0}".format(self._token)
+ response = requests.get(temp_url)
+ response.raise_for_status()
+ assert 'data' in response.json()
+ return User(response.json()['data'])
+# pylint: disable-all
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
new file mode 100644
index 0000000..02dd4e7
--- /dev/null
+++ b/src/friendlypins/user.py
@@ -0,0 +1,44 @@
+"""Interfaces for interacting with Pinterest users"""
+import logging
+
+class User(object):
+ """Abstraction around a Pinterest user and their associated data"""
+
+ def __init__(self, data):
+ """Constructor
+
+ :param dict data: JSON data parsed from the API
+ """
+ self._log = logging.getLogger(__name__)
+ self._data = data
+
+ @property
+ def unique_id(self):
+ """Gets the internal unique ID associated with the user
+ :rtype: :class:`str`
+ """
+ return self._data['id']
+
+ @property
+ def first_name(self):
+ """Gets the first name of the user
+ :rtype: :class:`str`
+ """
+ return self._data['first_name']
+
+ @property
+ def last_name(self):
+ """Gets the last name of the user
+ :rtype: :class:`str`
+ """
+ return self._data['last_name']
+
+ @property
+ def url(self):
+ """Gets the URL of the users profile
+ :rtype: :class:`str`
+ """
+ return self._data['url']
+
+if __name__ == "__main__":
+ pass
Add support for basic connectivity
Implement basic init method for API class, allowing connections to be made to a Pinterest account by specifying a user's API token, and perform a simple query to get information about the user who's token we're authenticating with.
diff --git a/conans/client/action_recorder.py b/conans/client/action_recorder.py
index bf573d9cb..d771bf075 100644
--- a/conans/client/action_recorder.py
+++ b/conans/client/action_recorder.py
@@ -7,6 +7,8 @@ from datetime import datetime
from collections import namedtuple, OrderedDict
# Install actions
+from conans.model.ref import ConanFileReference, PackageReference
+
INSTALL_CACHE = 0
INSTALL_DOWNLOADED = 1
INSTALL_BUILT = 2
@@ -32,8 +34,11 @@ class ActionRecorder(object):
def __init__(self):
self._inst_recipes_actions = OrderedDict()
self._inst_packages_actions = OrderedDict()
+ self._inst_recipes_develop = set() # Recipes being created (to set dependency=False)
# ###### INSTALL METHODS ############
+ def add_recipe_being_developed(self, reference):
+ self._inst_recipes_develop.add(reference)
def _add_recipe_action(self, reference, action):
if reference not in self._inst_recipes_actions:
@@ -90,6 +95,12 @@ class ActionRecorder(object):
ret.append((_package_ref, _package_action))
return ret
+ def in_development_recipe(self, reference):
+ return reference in self._inst_recipes_develop
+
+ def get_info(self):
+ return self.get_install_info()
+
def get_install_info(self):
ret = {"error": self.install_errored,
"installed": []}
@@ -98,11 +109,15 @@ class ActionRecorder(object):
error = None if the_action.type != INSTALL_ERROR else the_action.doc
doc = {"id": str(the_ref),
"downloaded": the_action.type == INSTALL_DOWNLOADED,
- "built": the_action.type == INSTALL_BUILT,
"cache": the_action.type == INSTALL_CACHE,
"error": error,
"remote": the_action.doc.get("remote", None),
"time": the_action.time}
+ if isinstance(the_ref, ConanFileReference):
+ doc["dependency"] = not self.in_development_recipe(the_ref)
+ else:
+ doc["built"] = the_action.type == INSTALL_BUILT
+
if doc["remote"] is None and error:
doc["remote"] = error.get("remote", None)
return doc
@@ -111,7 +126,6 @@ class ActionRecorder(object):
# Could be a download and then an access to cache, we want the first one
action = actions[0]
recipe_doc = get_doc_for_ref(ref, action)
- del recipe_doc["built"] # Avoid confusions
packages = self._get_installed_packages(ref)
tmp = {"recipe": recipe_doc,
"packages": []}
diff --git a/conans/client/command.py b/conans/client/command.py
index 19513161d..f805caf51 100644
--- a/conans/client/command.py
+++ b/conans/client/command.py
@@ -220,17 +220,21 @@ class Command(object):
cwd = os.getcwd()
+ info = None
try:
- self._conan.create(args.path, name, version, user, channel,
- args.profile, args.settings, args.options,
- args.env, args.test_folder, args.not_export,
- args.build, args.keep_source, args.keep_build, args.verify,
- args.manifests, args.manifests_interactive,
- args.remote, args.update,
- test_build_folder=args.test_build_folder)
+ info = self._conan.create(args.path, name, version, user, channel,
+ args.profile, args.settings, args.options,
+ args.env, args.test_folder, args.not_export,
+ args.build, args.keep_source, args.keep_build, args.verify,
+ args.manifests, args.manifests_interactive,
+ args.remote, args.update,
+ test_build_folder=args.test_build_folder)
+ except ConanException as exc:
+ info = exc.info
+ raise
finally:
- if args.json:
- self._outputer.json_install(self._conan.recorder.get_install_info(), args.json, cwd)
+ if args.json and info:
+ self._outputer.json_install(info, args.json, cwd)
def download(self, *args):
"""Downloads recipe and binaries to the local cache, without using settings. It works
@@ -289,34 +293,38 @@ class Command(object):
args = parser.parse_args(*args)
cwd = os.getcwd()
+ info = None
try:
try:
reference = ConanFileReference.loads(args.path_or_reference)
except ConanException:
- self._conan.install(path=args.path_or_reference,
- settings=args.settings, options=args.options,
- env=args.env,
- remote=args.remote,
- verify=args.verify, manifests=args.manifests,
- manifests_interactive=args.manifests_interactive,
- build=args.build, profile_name=args.profile,
- update=args.update, generators=args.generator,
- no_imports=args.no_imports,
- install_folder=args.install_folder)
+ info = self._conan.install(path=args.path_or_reference,
+ settings=args.settings, options=args.options,
+ env=args.env,
+ remote=args.remote,
+ verify=args.verify, manifests=args.manifests,
+ manifests_interactive=args.manifests_interactive,
+ build=args.build, profile_name=args.profile,
+ update=args.update, generators=args.generator,
+ no_imports=args.no_imports,
+ install_folder=args.install_folder)
else:
- self._conan.install_reference(reference, settings=args.settings,
- options=args.options,
- env=args.env,
- remote=args.remote,
- verify=args.verify, manifests=args.manifests,
- manifests_interactive=args.manifests_interactive,
- build=args.build, profile_name=args.profile,
- update=args.update,
- generators=args.generator,
- install_folder=args.install_folder)
+ info = self._conan.install_reference(reference, settings=args.settings,
+ options=args.options,
+ env=args.env,
+ remote=args.remote,
+ verify=args.verify, manifests=args.manifests,
+ manifests_interactive=args.manifests_interactive,
+ build=args.build, profile_name=args.profile,
+ update=args.update,
+ generators=args.generator,
+ install_folder=args.install_folder)
+ except ConanException as exc:
+ info = exc.info
+ raise
finally:
- if args.json:
- self._outputer.json_install(self._conan.recorder.get_install_info(), args.json, cwd)
+ if args.json and info:
+ self._outputer.json_install(info, args.json, cwd)
def config(self, *args):
"""Manages Conan configuration. Edits the conan.conf or installs config files.
diff --git a/conans/client/conan_api.py b/conans/client/conan_api.py
index 98ee25d5a..dd50689b2 100644
--- a/conans/client/conan_api.py
+++ b/conans/client/conan_api.py
@@ -65,15 +65,21 @@ def api_method(f):
the_self = args[0]
try:
log_command(f.__name__, kwargs)
+ the_self._init_manager()
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
- return f(*args, **kwargs)
+ ret = f(*args, **kwargs)
+ if ret is None: # FIXME: Probably each method should manage its return
+ return the_self._recorder.get_info()
+ return ret
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except:
pass
+ if isinstance(exc, ConanException):
+ exc.info = the_self._recorder.get_info()
raise
return wrapper
@@ -207,13 +213,22 @@ class ConanAPIV1(object):
self._user_io = user_io
self._runner = runner
self._remote_manager = remote_manager
- self.recorder = ActionRecorder()
+ self._search_manager = search_manager
+ self._settings_preprocessor = _settings_preprocessor
self._registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
- self._manager = ConanManager(client_cache, user_io, runner, remote_manager, search_manager,
- _settings_preprocessor, self.recorder, self._registry)
+ self._recorder = None
+ self._manager = None
+
if not interactive:
self._user_io.disable_input()
+ def _init_manager(self):
+ """Every api call gets a new recorder and new manager"""
+ self._recorder = ActionRecorder()
+ self._manager = ConanManager(self._client_cache, self._user_io, self._runner,
+ self._remote_manager, self._search_manager,
+ self._settings_preprocessor, self._recorder, self._registry)
+
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
cwd=None, visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None,
@@ -319,6 +334,7 @@ class ConanAPIV1(object):
"or it doesn't have a conanfile.py" % tf)
test_conanfile_path = get_test_conanfile_path(test_folder)
+ self._recorder.add_recipe_being_developed(reference)
if test_conanfile_path:
pt = PackageTester(self._manager, self._user_io)
diff --git a/conans/errors.py b/conans/errors.py
index 256f20a3f..e5b73a087 100644
--- a/conans/errors.py
+++ b/conans/errors.py
@@ -68,7 +68,9 @@ class ConanException(Exception):
"""
Generic conans exception
"""
- pass
+ def __init__(self, *args, **kwargs):
+ self.info = None
+ super(ConanException, self).__init__(*args, **kwargs)
class NoRemoteAvailable(ConanException):
Issues with the JSON created by conan create
Conan version: 1.2.0
Command: `conan create --json ./foo.json . myteam/unstable`
The generated JSON does not separate the dependency packages of the built project, from the built package. This makes it difficult to parse.
diff --git a/dimod/binary_quadratic_model.py b/dimod/binary_quadratic_model.py
index 47a6c83b..80397f47 100644
--- a/dimod/binary_quadratic_model.py
+++ b/dimod/binary_quadratic_model.py
@@ -5,6 +5,7 @@ todo - describe Ising, QUBO and BQM
"""
from __future__ import absolute_import, division
+from collections import Sized, Container, Iterable
from numbers import Number
from six import itervalues, iteritems, iterkeys
@@ -15,7 +16,7 @@ from dimod.utilities import resolve_label_conflict
from dimod.vartypes import Vartype
-class BinaryQuadraticModel(object):
+class BinaryQuadraticModel(Sized, Container, Iterable):
"""Encodes a binary quadratic model.
Binary quadratic model is the superclass that contains the `Ising model`_ and the QUBO_.
@@ -60,12 +61,23 @@ class BinaryQuadraticModel(object):
class assume that they are numeric.
Examples:
- This example creates a model with three spin variables.
+ This example creates a binary quadratic model with three spin variables.
- >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
- ... {(0, 1): .5, (1, 2): 1.5},
- ... 1.4,
- ... dimod.SPIN)
+ >>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
+ ... {(0, 1): .5, (1, 2): 1.5},
+ ... 1.4,
+ ... dimod.SPIN)
+
+ Variables can be any hashable object
+
+ >>> bqm = dimod.BinaryQuadraticModel({'a': 0.0, 'b': -1.0, 'c': 0.5},
+ ... {('a', 'b'): -1.0, ('b', 'c'): 1.5},
+ ... 1.4,
+ ... dimod.SPIN)
+ >>> len(bqm)
+ 3
+ >>> 'b' in bqm
+ True
Attributes:
linear (dict[variable, bias]):
@@ -195,7 +207,14 @@ class BinaryQuadraticModel(object):
def __len__(self):
"""The length is number of variables."""
- return len(self.linear)
+ return self.adj.__len__()
+
+ def __contains__(self, v):
+ """The variables"""
+ return self.adj.__contains__(v)
+
+ def __iter__(self):
+ return self.adj.__iter__()
##################################################################################################
# vartype properties
diff --git a/dimod/embedding/transforms.py b/dimod/embedding/transforms.py
index 332e0ec2..2b2d3b52 100644
--- a/dimod/embedding/transforms.py
+++ b/dimod/embedding/transforms.py
@@ -386,8 +386,11 @@ def unembed_response(target_response, embedding, source_bqm, chain_break_method=
chain_break_method (function, optional, default=:func:`.majority_vote`):
The method used to resolve chain breaks.
+ Returns:
+ :obj:`.Response`
+
"""
- if any(v not in source_bqm.linear for v in embedding):
+ if any(v not in embedding for v in source_bqm):
raise ValueError("given bqm does not match the embedding")
energies = []
BinaryQuadraticModel should have a correct abstract base class
Should be `collections.abc.Sized` as currently implemented.
Also could be `collections.abc.Container` or even `collections.abc.Collection`.
diff --git a/dimod/response.py b/dimod/response.py
index d3b2c38d..59d96329 100644
--- a/dimod/response.py
+++ b/dimod/response.py
@@ -87,21 +87,24 @@ class Response(Iterable, Sized):
self._samples_matrix = samples_matrix
num_samples, num_variables = samples_matrix.shape
- if not isinstance(data_vectors, dict):
+ if not isinstance(data_vectors, Mapping):
raise TypeError("expected 'data_vectors' to be a dict")
if 'energy' not in data_vectors:
raise ValueError("energy must be provided")
else:
- data_vectors = data_vectors.copy() # shallow copy
- data_vectors['energy'] = np.asarray(data_vectors['energy'])
- for vector in data_vectors.values():
- # todo - check that is a vector and that has the right length
- if isinstance(vector, (np.ndarray, list)):
- if len(vector) != num_samples:
- raise ValueError(("expected data vector {} (length {}) to be a vector of length {}"
- "").format(vector, len(vector), num_samples))
- else:
- raise TypeError("expected data vector {} to be a list of NumPy array".format(vector))
+ data_vectors = dict(data_vectors) # shallow copy
+
+ for key, vector in iteritems(data_vectors):
+ try:
+ data_vectors[key] = vector = np.asarray(vector)
+ except (ValueError, TypeError):
+ raise TypeError("expected data vector {} to be array-like".format(key))
+
+ shape = vector.shape
+ if not shape or shape[0] != num_samples:
+ raise ValueError(("expected data vector {} (shape {}) to have {} rows"
+ "").format(key, vector.shape, num_samples))
+
self._data_vectors = data_vectors
# vartype is checked by the decorator
@@ -824,10 +827,13 @@ class Response(Iterable, Sized):
# Viewing a Response
###############################################################################################
- def samples(self, sorted_by='energy'):
+ def samples(self, n=None, sorted_by='energy'):
"""Iterate over the samples in the response.
Args:
+ n (int, optional, default=None):
+ The maximum number of samples to provide. If None, all are provided.
+
sorted_by (str/None, optional, default='energy'):
Selects the `data_vector` used to sort the samples. If None, the samples are yielded in
the order given by the samples matrix.
@@ -861,13 +867,21 @@ class Response(Iterable, Sized):
{'a': -1, 'b': 1}
"""
+ num_samples = len(self)
+
+ if n is not None:
+ for sample in itertools.islice(self.samples(n=None, sorted_by=sorted_by), n):
+ yield sample
+ return
+
if sorted_by is None:
- order = np.arange(len(self))
+ order = np.arange(num_samples)
else:
order = np.argsort(self.data_vectors[sorted_by])
samples = self.samples_matrix
label_mapping = self.label_to_idx
+
for idx in order:
yield SampleView(idx, self)
data_vectors should have either numpy array values, or a list values, but not both.
https://github.com/dwavesystems/dimod/blob/7b75e47ce4fec541e432f84367ba58393934b941/dimod/response.py#L40
Making it be more than one thing requires the parser of the response to inspect the object before using it. If we want to have the benefits of a numpy array for some of the data_vectors, I think it's worth it to make everything a numpy array
If we don't need it to be a numpy array, might as well make them all lists?
diff --git a/stix2/properties.py b/stix2/properties.py
index ca7f04c..41841b6 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -129,6 +129,8 @@ class ListProperty(Property):
# constructor again
result.append(valid)
continue
+ elif type(self.contained) is DictionaryProperty:
+ obj_type = dict
else:
obj_type = self.contained
diff --git a/stix2/utils.py b/stix2/utils.py
index 9febd78..4ef3d23 100644
--- a/stix2/utils.py
+++ b/stix2/utils.py
@@ -166,7 +166,7 @@ def get_dict(data):
def find_property_index(obj, properties, tuple_to_find):
"""Recursively find the property in the object model, return the index
according to the _properties OrderedDict. If it's a list look for
- individual objects.
+ individual objects. Returns and integer indicating its location
"""
from .base import _STIXBase
try:
@@ -183,6 +183,11 @@ def find_property_index(obj, properties, tuple_to_find):
tuple_to_find)
if val is not None:
return val
+ elif isinstance(item, dict):
+ for idx, val in enumerate(sorted(item)):
+ if (tuple_to_find[0] == val and
+ item.get(val) == tuple_to_find[1]):
+ return idx
elif isinstance(pv, dict):
if pv.get(tuple_to_find[0]) is not None:
try:
Create an Extension with Dict annidate inside List
Hi,
I'm trying to create a CyberObservable Extension for UserAccount which have to contain a DictionaryProperty() inside a ListProperty(). It is possible? Because when I try to create an extension like this one
```
@CustomExtension(UserAccount, 'ssh_keys', {
keys': ListProperty(DictionaryProperty(), required=True)
})
class SSHKeysExtension:
pass
```
and use it with example = SSHKeysExtension(keys=[{'test123':123, 'test345','aaaa'}])
I obtain a lot of strange errors (the library seems to interpreter the dict as parameters for __init__()
oasis-open/cti-python-stix2
diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py
index a14503f..b45670f 100644
--- a/stix2/test/test_custom.py
+++ b/stix2/test/test_custom.py
@@ -479,6 +479,27 @@ def test_custom_extension_wrong_observable_type():
assert 'Cannot determine extension type' in excinfo.value.reason
[email protected]("data", [
+ """{
+ "keys": [
+ {
+ "test123": 123,
+ "test345": "aaaa"
+ }
+ ]
+}""",
+])
+def test_custom_extension_with_list_and_dict_properties_observable_type(data):
+ @stix2.observables.CustomExtension(stix2.UserAccount, 'some-extension', [
+ ('keys', stix2.properties.ListProperty(stix2.properties.DictionaryProperty, required=True))
+ ])
+ class SomeCustomExtension:
+ pass
+
+ example = SomeCustomExtension(keys=[{'test123': 123, 'test345': 'aaaa'}])
+ assert data == str(example)
+
+
def test_custom_extension_invalid_observable():
# These extensions are being applied to improperly-created Observables.
# The Observable classes should have been created with the CustomObservable decorator.
diff --git a/stix2/test/test_properties.py b/stix2/test/test_properties.py
index 34edc96..16ff06a 100644
--- a/stix2/test/test_properties.py
+++ b/stix2/test/test_properties.py
@@ -1,6 +1,6 @@
import pytest
-from stix2 import EmailMIMEComponent, ExtensionsProperty, TCPExt
+from stix2 import CustomObject, EmailMIMEComponent, ExtensionsProperty, TCPExt
from stix2.exceptions import AtLeastOnePropertyError, DictionaryKeyError
from stix2.properties import (BinaryProperty, BooleanProperty,
DictionaryProperty, EmbeddedObjectProperty,
@@ -266,6 +266,17 @@ def test_dictionary_property_invalid(d):
assert str(excinfo.value) == d[1]
+def test_property_list_of_dictionary():
+ @CustomObject('x-new-obj', [
+ ('property1', ListProperty(DictionaryProperty(), required=True)),
+ ])
+ class NewObj():
+ pass
+
+ test_obj = NewObj(property1=[{'foo': 'bar'}])
+ assert test_obj.property1[0]['foo'] == 'bar'
+
+
@pytest.mark.parametrize("value", [
{"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b"},
[('MD5', '2dfb1bcc980200c6706feee399d41b3f'), ('RIPEMD-160', 'b3a8cd8a27c90af79b3c81754f267780f443dfef')],
diff --git a/plasmapy/physics/parameters.py b/plasmapy/physics/parameters.py
index d2354c91..40745ed2 100644
--- a/plasmapy/physics/parameters.py
+++ b/plasmapy/physics/parameters.py
@@ -334,9 +334,11 @@ def ion_sound_speed(T_e,
@utils.check_relativistic
@utils.check_quantity({
- 'T': {'units': u.K, 'can_be_negative': False}
+ 'T': {'units': u.K, 'can_be_negative': False},
+ 'mass': {'units': u.kg, 'can_be_negative': False, 'can_be_nan': True}
})
-def thermal_speed(T, particle="e-", method="most_probable"):
[email protected]_input
+def thermal_speed(T, particle: atomic.Particle="e-", method="most_probable", mass=np.nan*u.kg):
r"""
Return the most probable speed for a particle within a Maxwellian
distribution.
@@ -356,6 +358,11 @@ def thermal_speed(T, particle="e-", method="most_probable"):
Method to be used for calculating the thermal speed. Options are
`'most_probable'` (default), `'rms'`, and `'mean_magnitude'`.
+ mass : ~astropy.units.Quantity
+ The particle's mass override. Defaults to NaN and if so, doesn't do
+ anything, but if set, overrides mass acquired from `particle`. Useful
+ with relative velocities of particles.
+
Returns
-------
V : ~astropy.units.Quantity
@@ -417,10 +424,7 @@ def thermal_speed(T, particle="e-", method="most_probable"):
T = T.to(u.K, equivalencies=u.temperature_energy())
- try:
- m = atomic.particle_mass(particle)
- except AtomicError:
- raise ValueError("Unable to find {particle} mass in thermal_speed")
+ m = mass if np.isfinite(mass) else atomic.particle_mass(particle)
# different methods, as per https://en.wikipedia.org/wiki/Thermal_velocity
if method == "most_probable":
diff --git a/plasmapy/physics/transport/collisions.py b/plasmapy/physics/transport/collisions.py
index b061e316..82868951 100644
--- a/plasmapy/physics/transport/collisions.py
+++ b/plasmapy/physics/transport/collisions.py
@@ -7,14 +7,13 @@
import warnings
# plasmapy modules
-import plasmapy.atomic as atomic
from plasmapy import utils
from plasmapy.utils.checks import (check_quantity,
_check_relativistic)
from plasmapy.constants import (c, m_e, k_B, e, eps0, pi, hbar)
-from plasmapy.atomic import (particle_mass, integer_charge)
-from plasmapy.physics.parameters import (Debye_length)
+from plasmapy import atomic
+from plasmapy.physics import parameters
from plasmapy.physics.quantum import (Wigner_Seitz_radius,
thermal_deBroglie_wavelength,
chemical_potential)
@@ -246,29 +245,16 @@ def _boilerPlate(T, particles, V):
"list or tuple containing representations of two "
f"charged particles. Got {particles} instead.")
- masses = np.zeros(2) * u.kg
- charges = np.zeros(2) * u.C
-
- for particle, i in zip(particles, range(2)):
-
- try:
- masses[i] = particle_mass(particles[i])
- except Exception:
- raise ValueError("Unable to find mass of particle: "
- f"{particles[i]}.")
- try:
- charges[i] = np.abs(e * integer_charge(particles[i]))
- if charges[i] is None:
- raise ValueError("Unable to find charge of particle: "
- f"{particles[i]}.")
- except Exception:
- raise ValueError("Unable to find charge of particle: "
- f"{particles[i]}.")
+ particles = [atomic.Particle(p) for p in particles]
+ masses = [p.mass for p in particles]
+ charges = [np.abs(p.charge) for p in particles]
+
# obtaining reduced mass of 2 particle collision system
- reduced_mass = masses[0] * masses[1] / (masses[0] + masses[1])
+ reduced_mass = atomic.reduced_mass(*particles)
+
# getting thermal velocity of system if no velocity is given
if np.isnan(V):
- V = np.sqrt(2 * k_B * T / reduced_mass).to(u.m / u.s)
+ V = parameters.thermal_speed(T, mass=reduced_mass)
_check_relativistic(V, 'V')
return T, masses, charges, reduced_mass, V
@@ -485,7 +471,7 @@ def impact_parameter(T,
raise ValueError("Must provide a z_mean for GMS-2, GMS-5, and "
"GMS-6 methods.")
# Debye length
- lambdaDe = Debye_length(T, n_e)
+ lambdaDe = parameters.Debye_length(T, n_e)
# deBroglie wavelength
lambdaBroglie = hbar / (2 * reduced_mass * V)
# distance of closest approach in 90 degree Coulomb collision
Split up `_boilerPlate()` in `transport.py`
`_boilerPlate()` currently does a few different things like fetching particle data, calculating thermal velocity, tests/checks. Each piece of functionality should be split into its own function.
See #191
PlasmaPy/PlasmaPy
diff --git a/plasmapy/physics/tests/test_distribution.py b/plasmapy/physics/tests/test_distribution.py
index f4c3f3ba..35e7ffb0 100644
--- a/plasmapy/physics/tests/test_distribution.py
+++ b/plasmapy/physics/tests/test_distribution.py
@@ -104,16 +104,6 @@ def test_std(self):
T_distri = (std**2 / k_B * m_e).to(u.K)
assert np.isclose(T_distri.value, self.T_e.value)
- def test_valErr(self):
- """
- Tests whether ValueError is raised when invalid particle name
- string is passed.
- """
- with pytest.raises(ValueError):
- Maxwellian_1D(1 * u.m / u.s,
- T=1 * u.K,
- particle='XXX')
-
def test_units_no_vTh(self):
"""
Tests distribution function with units, but not passing vTh.
@@ -813,17 +803,6 @@ def test_std(self):
T_distri = (std**2 / k_B * m_e).to(u.K)
assert np.isclose(T_distri.value, self.T_e.value)
- def test_valErr(self):
- """
- Tests whether ValueError is raised when invalid particle name
- string is passed.
- """
- with pytest.raises(ValueError):
- kappa_velocity_1D(1 * u.m / u.s,
- T=1 * u.K,
- kappa=self.kappa,
- particle='XXX')
-
def test_units_no_vTh(self):
"""
Tests distribution function with units, but not passing vTh.
diff --git a/plasmapy/physics/tests/test_parameters.py b/plasmapy/physics/tests/test_parameters.py
index c9a0e5f5..30ebb26a 100644
--- a/plasmapy/physics/tests/test_parameters.py
+++ b/plasmapy/physics/tests/test_parameters.py
@@ -305,7 +305,7 @@ def test_thermal_speed():
with pytest.raises(RelativityError):
thermal_speed(1e14 * u.K, particle='p')
- with pytest.raises(ValueError):
+ with pytest.raises(InvalidParticleError):
thermal_speed(T_i, particle='asdfasd')
with pytest.warns(u.UnitsWarning):
@@ -561,9 +561,6 @@ def test_gyroradius():
with pytest.raises(TypeError):
gyroradius(u.T, particle="p", Vperp=8 * u.m / u.s)
- with pytest.raises(ValueError):
- gyroradius(B, particle='asfdas', T_i=T_i)
-
with pytest.raises(ValueError):
gyroradius(B, particle='p', T_i=-1 * u.K)
diff --git a/plasmapy/physics/transport/tests/test_collisions.py b/plasmapy/physics/transport/tests/test_collisions.py
index bc60f0f4..5a2ceaa6 100644
--- a/plasmapy/physics/transport/tests/test_collisions.py
+++ b/plasmapy/physics/transport/tests/test_collisions.py
@@ -11,7 +11,7 @@
Knudsen_number,
coupling_parameter)
from plasmapy.physics.transport.collisions import Spitzer_resistivity
-from plasmapy.utils import RelativityWarning, RelativityError, PhysicsWarning
+from plasmapy.utils import exceptions
from plasmapy.constants import m_p, m_e, c
@@ -117,7 +117,7 @@ def test_Chen_fusion(self):
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 16 + np.log(2)
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ('e', 'p'))
testTrue = np.isclose(lnLambda,
lnLambdaChen,
@@ -140,7 +140,7 @@ def test_Chen_laser(self):
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 6.8 + np.log(2)
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ('e', 'p'))
testTrue = np.isclose(lnLambda,
lnLambdaChen,
@@ -155,7 +155,7 @@ def test_GMS1(self):
Test for first version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -176,7 +176,7 @@ def test_GMS1_negative(self):
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -196,7 +196,7 @@ def test_GMS2(self):
Test for second version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -217,7 +217,7 @@ def test_GMS2_negative(self):
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -237,7 +237,7 @@ def test_GMS3(self):
Test for third version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -259,7 +259,7 @@ def test_GMS3_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -279,7 +279,7 @@ def test_GMS4(self):
Test for fourth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -301,7 +301,7 @@ def test_GMS4_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -321,7 +321,7 @@ def test_GMS5(self):
Test for fifth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -343,7 +343,7 @@ def test_GMS5_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -363,7 +363,7 @@ def test_GMS6(self):
Test for sixth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -385,7 +385,7 @@ def test_GMS6_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -435,12 +435,12 @@ def test_GMS6_zmean_error(self):
def test_relativity_warn(self):
"""Tests whether relativity warning is raised at high velocity."""
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ('e', 'p'), V=0.9 * c)
def test_relativity_error(self):
"""Tests whether relativity error is raised at light speed."""
- with pytest.raises(RelativityError):
+ with pytest.raises(exceptions.RelativityError):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ('e', 'p'), V=1.1 * c)
def test_unit_conversion_error(self):
@@ -464,7 +464,7 @@ def test_invalid_particle_error(self):
Tests whether an error is raised when an invalid particle name
is given.
"""
- with pytest.raises(ValueError):
+ with pytest.raises(exceptions.InvalidParticleError):
Coulomb_logarithm(1 * u.K, 5 * u.m ** -3, ('e', 'g'))
n_e = np.array([1e9, 1e9, 1e24]) * u.cm ** -3
@@ -605,7 +605,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -626,7 +626,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -645,7 +645,7 @@ def test_electrons(self):
"""
Testing collision frequency between electrons.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.electrons,
@@ -664,7 +664,7 @@ def test_protons(self):
"""
Testing collision frequency between protons (ions).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.protons,
@@ -683,7 +683,7 @@ def test_zmean(self):
"""
Test collisional frequency function when given arbitrary z_mean.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -714,7 +714,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(self.T,
self.n_e,
self.particles,
@@ -735,7 +735,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(self.T,
self.n_e,
self.particles,
@@ -834,7 +834,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -855,7 +855,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -872,7 +872,7 @@ def test_fail1(self):
def test_zmean(self):
"""Testing mobility when z_mean is passed."""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -904,7 +904,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(self.length,
self.T,
self.n_e,
@@ -926,7 +926,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(self.length,
self.T,
self.n_e,
diff --git a/setup.py b/setup.py
index 6604fe6..7c44b31 100755
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,9 @@ PROJECT_NAME = 'friendlypins'
PROJECT_DEPENDENCIES = [
'requests',
'six',
- 'dateutils']
+ 'dateutils',
+ 'tqdm',
+ 'pillow']
PROJECT_DEV_DEPENDENCIES = [
'wheel<1.0.0',
'twine<2.0.0',
diff --git a/src/friendlypins/headers.py b/src/friendlypins/headers.py
index 5df1ea4..b05b9f8 100644
--- a/src/friendlypins/headers.py
+++ b/src/friendlypins/headers.py
@@ -79,5 +79,13 @@ class Headers(object):
# return time data in current locale for convenience
return date_with_tz.astimezone(tz.tzlocal())
+ @property
+ def bytes(self):
+ """Gets the number of bytes contained in the response data
+
+ :rtype: :class:`int`
+ """
+ return int(self._data['Content-Length'])
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 84c63b7..864bfbc 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -3,7 +3,9 @@ import logging
import os
from six.moves import urllib
import requests
+from tqdm import tqdm
from friendlypins.api import API
+from friendlypins.headers import Headers
def _download_pin(pin, folder):
"""Helper method for downloading a thumbnail from a single pin
@@ -15,6 +17,7 @@ def _download_pin(pin, folder):
:rtype: :class:`int`
"""
log = logging.getLogger(__name__)
+
temp_url = urllib.parse.urlparse(pin.thumbnail.url)
temp_filename = os.path.basename(temp_url.path)
output_file = os.path.join(folder, temp_filename)
@@ -28,6 +31,9 @@ def _download_pin(pin, folder):
try:
response = requests.get(pin.thumbnail.url, stream=True)
response.raise_for_status()
+ headers = Headers(response.headers)
+ log.debug(headers)
+
with open(output_file, "wb") as handle:
for data in response.iter_content():
handle.write(data)
@@ -64,16 +70,18 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
return 1
all_pins = selected_board.all_pins
- log.info('Downloading %s thumbnails...', len(all_pins))
+ log.info('Downloading thumbnails...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
- for cur_pin in all_pins:
- retval = _download_pin(cur_pin, output_folder)
- if retval:
- return retval
- if delete:
- cur_pin.delete()
+ with tqdm(total=selected_board.num_pins, unit='b', ncols=80) as pbar:
+ for cur_pin in all_pins:
+ retval = _download_pin(cur_pin, output_folder)
+ if retval:
+ return retval
+ if delete:
+ cur_pin.delete()
+ pbar.update()
return 0
Add progress bar support to fpins console app
To make it easier to track the overall progress of a lengthy download operation, we should add support for showing a progress bar to the fpins console app.
diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index 045655f..7f51a3b 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -64,16 +64,12 @@ class Board(object):
return int(self._data['counts']['pins'])
@property
- def all_pins(self):
- """Gets a list of all pins from this board
+ def pins(self):
+ """Generator for iterating over the pins linked to this board
- NOTE: This process may take a long time to complete and require
- a lot of memory for boards that contain large numbers of pins
-
- :rtype: :class:`list` of :class:`friendlypins.pin.Pin`
+ :rtype: Generator of :class:`friendlypins.pin.Pin`
"""
- self._log.debug('Gettings all pins for board %s...', self.name)
- retval = list()
+ self._log.debug('Loading pins for board %s...', self.name)
properties = {
"fields": ','.join([
@@ -93,19 +89,20 @@ class Board(object):
])
}
+ page = 0
while True:
+ self._log.debug("Loading pins page %s", page)
result = self._io.get(
"boards/{0}/pins".format(self.unique_id),
properties)
assert 'data' in result
for cur_item in result['data']:
- retval.append(Pin(cur_item, self._io))
+ yield Pin(cur_item, self._io)
if not result["page"]["cursor"]:
break
properties["cursor"] = result["page"]["cursor"]
-
- return retval
+ page += 1
if __name__ == "__main__":
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index de3306d..3d69138 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -94,21 +94,39 @@ class User(object):
@property
def boards(self):
- """Gets a list of boards owned by this user
+ """Generator for iterating over the boards owned by this user
- :rtype: :class:`list` of :class:`friendlypins.board.Board`
+ :rtype: Generator of :class:`friendlypins.board.Board`
"""
- self._log.debug("Loading boards for user %s...", self.name)
-
- fields = "id,name,url,description,creator,created_at,counts,image"
- result = self._io.get('me/boards', {"fields": fields})
-
- assert 'data' in result
-
- retval = []
- for cur_item in result['data']:
- retval.append(Board(cur_item, self._io))
- return retval
+ self._log.debug('Loading boards for user %s...', self.name)
+
+ properties = {
+ "fields": ','.join([
+ "id",
+ "name",
+ "url",
+ "description",
+ "creator",
+ "created_at",
+ "counts",
+ "image"
+ ])
+ }
+
+ page = 0
+ while True:
+ self._log.debug("Loading boards page %s", page)
+ result = self._io.get("me/boards", properties)
+ assert 'data' in result
+
+ for cur_item in result['data']:
+ yield Board(cur_item, self._io)
+
+ if not result["page"]["cursor"]:
+ break
+
+ properties["cursor"] = result["page"]["cursor"]
+ page += 1
if __name__ == "__main__":
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 1ca66ef..da40dfc 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -71,13 +71,12 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
log.error("Could not find selected board: %s", board_name)
return 1
- all_pins = selected_board.all_pins
log.info('Downloading thumbnails...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with tqdm(total=selected_board.num_pins, unit='b', ncols=80) as pbar:
- for cur_pin in all_pins:
+ for cur_pin in selected_board.pins:
retval = _download_pin(cur_pin, output_folder)
if retval:
return retval
lazy load boards and pins
To simplify and optimize the interactions with boards and pins we should lazy-load the data from these two API calls using iterators:
User.boards
Board.all_pins
diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index cff46b6..63b0360 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -17,20 +17,13 @@ class API(object): # pylint: disable=too-few-public-methods
self._log = logging.getLogger(__name__)
self._io = RestIO(personal_access_token)
- def get_user(self, username=None):
- """Gets all primitives associated with a particular Pinterest user
-
- :param str username:
- Optional name of a user to look up
- If not provided, the currently authentcated user will be returned
-
- :returns: Pinterest user with the given name
+ @property
+ def user(self):
+ """Gets all primitives associated with the authenticated user
+ :returns: currently authenticated pinterest user
:rtype: :class:`friendlypins.user.User`
"""
self._log.debug("Getting authenticated user details...")
- if username:
- raise NotImplementedError(
- "Querying arbitrary Pinerest users is not yet supported.")
fields = "id,username,first_name,last_name,bio,created_at,counts,image"
result = self._io.get("me", {"fields": fields})
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 8789490..761d5b9 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -60,7 +60,7 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
"""
log = logging.getLogger(__name__)
obj = API(api_token)
- user = obj.get_user()
+ user = obj.user
selected_board = None
for cur_board in user.boards:
@@ -97,7 +97,7 @@ def delete_board(api_token, board_name):
"""
log = logging.getLogger(__name__)
obj = API(api_token)
- user = obj.get_user()
+ user = obj.user
selected_board = None
for cur_board in user.boards:
rename get_user to current_user
Seeing as how we currently only support retrieving data for the currently authenticated user, we should rename get_user to current_user and make it a property with no method parameters.
dthkao:
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*[mobly/controllers/android_device_lib/adb.py, line 279 at r1](https://beta.reviewable.io/reviews/google/mobly/437#-LAIsFf7-05sU6C6Hx5K:-LAIsFf7-05sU6C6Hx5L:b1zt8o6) ([raw file](https://github.com/google/mobly/blob/93853c8ba9cc06560e500141a68f3fa9039824ab/mobly/controllers/android_device_lib/adb.py#L279)):*
> ```Python
>
> def __getattr__(self, name):
> def adb_call(args=None, shell=False, timeout=None, return_all=False):
> ```
I'm not a huge fan of signatures changing based on a flag. Is there a way we can make this an init-level setting instead of per-call?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/437)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device_lib/adb.py, line 279 at r1](https://beta.reviewable.io/reviews/google/mobly/437#-LAIsFf7-05sU6C6Hx5K:-LAIv4JN3Pnm3xAhzfbd:bwk8scm) ([raw file](https://github.com/google/mobly/blob/93853c8ba9cc06560e500141a68f3fa9039824ab/mobly/controllers/android_device_lib/adb.py#L279)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
I'm not a huge fan of signatures changing based on a flag. Is there a way we can make this an init-level setting instead of per-call?
</blockquote></details>
I thought of the same thing initially.
However that wouldn't work since that'll break any util that makes adb calls and expect a single output, which is quite a lot.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/437)*
<!-- Sent from Reviewable.io -->
diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py
index 432f08e..12c14bd 100644
--- a/mobly/controllers/android_device_lib/adb.py
+++ b/mobly/controllers/android_device_lib/adb.py
@@ -138,7 +138,7 @@ class AdbProxy(object):
def __init__(self, serial=''):
self.serial = serial
- def _exec_cmd(self, args, shell, timeout):
+ def _exec_cmd(self, args, shell, timeout, stderr):
"""Executes adb commands.
Args:
@@ -148,6 +148,8 @@ class AdbProxy(object):
False to invoke it directly. See subprocess.Popen() docs.
timeout: float, the number of seconds to wait before timing out.
If not specified, no timeout takes effect.
+ stderr: a Byte stream, like io.BytesIO, stderr of the command will
+ be written to this object if provided.
Returns:
The output of the adb command run if exit code is 0.
@@ -169,6 +171,8 @@ class AdbProxy(object):
raise AdbTimeoutError(cmd=args, timeout=timeout)
(out, err) = proc.communicate()
+ if stderr:
+ stderr.write(err)
ret = proc.returncode
logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',
cli_cmd_to_string(args), out, err, ret)
@@ -177,7 +181,7 @@ class AdbProxy(object):
else:
raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
- def _exec_adb_cmd(self, name, args, shell, timeout):
+ def _exec_adb_cmd(self, name, args, shell, timeout, stderr):
if shell:
# Add quotes around "adb" in case the ADB path contains spaces. This
# is pretty common on Windows (e.g. Program Files).
@@ -195,7 +199,9 @@ class AdbProxy(object):
adb_cmd.append(args)
else:
adb_cmd.extend(args)
- return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout)
+ out = self._exec_cmd(
+ adb_cmd, shell=shell, timeout=timeout, stderr=stderr)
+ return out
def getprop(self, prop_name):
"""Get a property of the device.
@@ -273,7 +279,7 @@ class AdbProxy(object):
return self.shell(instrumentation_command)
def __getattr__(self, name):
- def adb_call(args=None, shell=False, timeout=None):
+ def adb_call(args=None, shell=False, timeout=None, stderr=None):
"""Wrapper for an ADB command.
Args:
@@ -283,6 +289,8 @@ class AdbProxy(object):
False to invoke it directly. See subprocess.Proc() docs.
timeout: float, the number of seconds to wait before timing out.
If not specified, no timeout takes effect.
+ stderr: a Byte stream, like io.BytesIO, stderr of the command
+ will be written to this object if provided.
Returns:
The output of the adb command run if exit code is 0.
@@ -290,6 +298,6 @@ class AdbProxy(object):
args = args or ''
clean_name = name.replace('_', '-')
return self._exec_adb_cmd(
- clean_name, args, shell=shell, timeout=timeout)
+ clean_name, args, shell=shell, timeout=timeout, stderr=stderr)
return adb_call
Propagate stderr from adb commands
The current mobly adb proxy does not propagate stderr if ret code is zero.
We thought this was ok since Android has fixed return code issues in M.
But turns out many China manufacturers did not fix this in China devices.
In order to better support China devices and potentially other devices of the same ret code problem, we need to surface stderr.
google/mobly
diff --git a/mobly/base_instrumentation_test.py b/mobly/base_instrumentation_test.py
index 4966cd4..bb72075 100644
--- a/mobly/base_instrumentation_test.py
+++ b/mobly/base_instrumentation_test.py
@@ -927,7 +927,7 @@ class BaseInstrumentationTestClass(base_test.BaseTestClass):
package=package,
options=options,
runner=runner,
- )
+ ).decode('utf-8')
logging.info('Outputting instrumentation test log...')
logging.info(instrumentation_output)
@@ -935,5 +935,5 @@ class BaseInstrumentationTestClass(base_test.BaseTestClass):
instrumentation_block = _InstrumentationBlock(prefix=prefix)
for line in instrumentation_output.splitlines():
instrumentation_block = self._parse_line(instrumentation_block,
- line.decode('utf-8'))
+ line)
return self._finish_parsing(instrumentation_block)
diff --git a/mobly/base_test.py b/mobly/base_test.py
index 8b761fa..e4e047b 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -26,6 +26,7 @@ from mobly import expects
from mobly import records
from mobly import signals
from mobly import runtime_test_info
+from mobly import utils
# Macro strings for test result reporting
TEST_CASE_TOKEN = '[Test]'
@@ -351,7 +352,7 @@ class BaseTestClass(object):
content: dict, the data to add to summary file.
"""
if 'timestamp' not in content:
- content['timestamp'] = time.time()
+ content['timestamp'] = utils.get_current_epoch_time()
self.summary_writer.dump(content,
records.TestSummaryEntryType.USER_DATA)
diff --git a/tests/mobly/base_instrumentation_test_test.py b/tests/mobly/base_instrumentation_test_test.py
index 2256475..3908015 100755
--- a/tests/mobly/base_instrumentation_test_test.py
+++ b/tests/mobly/base_instrumentation_test_test.py
@@ -34,6 +34,17 @@ MOCK_PREFIX = 'my_prefix'
# A mock name for the instrumentation test subclass.
MOCK_INSTRUMENTATION_TEST_CLASS_NAME = 'MockInstrumentationTest'
+MOCK_EMPTY_INSTRUMENTATION_TEST = """\
+INSTRUMENTATION_RESULT: stream=
+
+Time: 0.001
+
+OK (0 tests)
+
+
+INSTRUMENTATION_CODE: -1
+"""
+
class MockInstrumentationTest(BaseInstrumentationTestClass):
def __init__(self, tmp_dir, user_params={}):
@@ -229,18 +240,21 @@ INSTRUMENTATION_STATUS_CODE: -1
instrumentation_output, expected_has_error=True)
def test_run_instrumentation_test_with_no_tests(self):
- instrumentation_output = """\
-INSTRUMENTATION_RESULT: stream=
-
-Time: 0.001
-
-OK (0 tests)
-
+ instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
+ self.assert_run_instrumentation_test(
+ instrumentation_output, expected_completed_and_passed=True)
-INSTRUMENTATION_CODE: -1
-"""
+ @unittest.skipUnless(
+ sys.version_info >= (3, 0),
+ 'Only python3 displays different string types differently.')
+ @mock.patch('logging.info')
+ def test_run_instrumentation_test_logs_correctly(self, mock_info_logger):
+ instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
self.assert_run_instrumentation_test(
instrumentation_output, expected_completed_and_passed=True)
+ for mock_call in mock_info_logger.mock_calls:
+ logged_format = mock_call[1][0]
+ self.assertIsInstance(logged_format, str)
def test_run_instrumentation_test_with_passing_test(self):
instrumentation_output = """\
diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py
index 9eb3ab8..7bf61ab 100755
--- a/tests/mobly/controllers/android_device_lib/adb_test.py
+++ b/tests/mobly/controllers/android_device_lib/adb_test.py
@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import io
import mock
+import subprocess
from collections import OrderedDict
from future.tests.base import unittest
-
from mobly.controllers.android_device_lib import adb
# Mock parameters for instrumentation.
@@ -42,6 +43,9 @@ MOCK_OPTIONS_INSTRUMENTATION_COMMAND = ('am instrument -r -w -e option1 value1'
# Mock Shell Command
MOCK_SHELL_COMMAND = 'ls'
MOCK_COMMAND_OUTPUT = '/system/bin/ls'.encode('utf-8')
+MOCK_DEFAULT_STDOUT = 'out'
+MOCK_DEFAULT_STDERR = 'err'
+MOCK_DEFAULT_COMMAND_OUTPUT = MOCK_DEFAULT_STDOUT.encode('utf-8')
MOCK_ADB_SHELL_COMMAND_CHECK = 'adb shell command -v ls'
@@ -58,7 +62,8 @@ class AdbTest(unittest.TestCase):
mock_psutil_process.return_value = mock.Mock()
mock_proc.communicate = mock.Mock(
- return_value=('out'.encode('utf-8'), 'err'.encode('utf-8')))
+ return_value=(MOCK_DEFAULT_STDOUT.encode('utf-8'),
+ MOCK_DEFAULT_STDERR.encode('utf-8')))
mock_proc.returncode = 0
return (mock_psutil_process, mock_popen)
@@ -68,9 +73,9 @@ class AdbTest(unittest.TestCase):
mock_Popen):
self._mock_process(mock_psutil_process, mock_Popen)
- reply = adb.AdbProxy()._exec_cmd(
- ['fake_cmd'], shell=False, timeout=None)
- self.assertEqual('out', reply.decode('utf-8'))
+ out = adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=None, stderr=None)
+ self.assertEqual(MOCK_DEFAULT_STDOUT, out.decode('utf-8'))
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -81,7 +86,8 @@ class AdbTest(unittest.TestCase):
with self.assertRaisesRegex(adb.AdbError,
'Error executing adb cmd .*'):
- adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=None)
+ adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=None, stderr=None)
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -89,8 +95,9 @@ class AdbTest(unittest.TestCase):
mock_popen):
self._mock_process(mock_psutil_process, mock_popen)
- reply = adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=1)
- self.assertEqual('out', reply.decode('utf-8'))
+ out = adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=1, stderr=None)
+ self.assertEqual(MOCK_DEFAULT_STDOUT, out.decode('utf-8'))
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -104,7 +111,8 @@ class AdbTest(unittest.TestCase):
with self.assertRaisesRegex(adb.AdbTimeoutError,
'Timed out executing command "fake_cmd" '
'after 0.1s.'):
- adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=0.1)
+ adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=0.1, stderr=None)
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -113,66 +121,100 @@ class AdbTest(unittest.TestCase):
self._mock_process(mock_psutil_process, mock_popen)
with self.assertRaisesRegex(adb.Error,
'Timeout is not a positive value: -1'):
- adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=-1)
+ adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=-1, stderr=None)
def test_exec_adb_cmd(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().shell(['arg1', 'arg2'])
mock_exec_cmd.assert_called_once_with(
- ['adb', 'shell', 'arg1', 'arg2'], shell=False, timeout=None)
+ ['adb', 'shell', 'arg1', 'arg2'],
+ shell=False,
+ timeout=None,
+ stderr=None)
+
+ def test_exec_adb_cmd_with_serial(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy('12345').shell(['arg1', 'arg2'])
mock_exec_cmd.assert_called_once_with(
['adb', '-s', '12345', 'shell', 'arg1', 'arg2'],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_exec_adb_cmd_with_shell_true(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().shell('arg1 arg2', shell=True)
mock_exec_cmd.assert_called_once_with(
- '"adb" shell arg1 arg2', shell=True, timeout=None)
+ '"adb" shell arg1 arg2', shell=True, timeout=None, stderr=None)
+
+ def test_exec_adb_cmd_with_shell_true_with_serial(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy('12345').shell('arg1 arg2', shell=True)
mock_exec_cmd.assert_called_once_with(
- '"adb" -s "12345" shell arg1 arg2', shell=True, timeout=None)
+ '"adb" -s "12345" shell arg1 arg2',
+ shell=True,
+ timeout=None,
+ stderr=None)
+
+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
+ @mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
+ def test_exec_adb_cmd_with_stderr_pipe(self, mock_psutil_process,
+ mock_popen):
+ self._mock_process(mock_psutil_process, mock_popen)
+ stderr_redirect = io.BytesIO()
+ out = adb.AdbProxy().shell(
+ 'arg1 arg2', shell=True, stderr=stderr_redirect)
+ self.assertEqual(MOCK_DEFAULT_STDOUT, out.decode('utf-8'))
+ self.assertEqual(MOCK_DEFAULT_STDERR,
+ stderr_redirect.getvalue().decode('utf-8'))
def test_instrument_without_parameters(self):
"""Verifies the AndroidDevice object's instrument command is correct in
the basic case.
"""
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().instrument(MOCK_INSTRUMENTATION_PACKAGE)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', MOCK_BASIC_INSTRUMENTATION_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_instrument_with_runner(self):
"""Verifies the AndroidDevice object's instrument command is correct
with a runner specified.
"""
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().instrument(
MOCK_INSTRUMENTATION_PACKAGE,
runner=MOCK_INSTRUMENTATION_RUNNER)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', MOCK_RUNNER_INSTRUMENTATION_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_instrument_with_options(self):
"""Verifies the AndroidDevice object's instrument command is correct
with options.
"""
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().instrument(
MOCK_INSTRUMENTATION_PACKAGE,
options=MOCK_INSTRUMENTATION_OPTIONS)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', MOCK_OPTIONS_INSTRUMENTATION_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_cli_cmd_to_string(self):
cmd = ['"adb"', 'a b', 'c//']
@@ -182,11 +224,13 @@ class AdbTest(unittest.TestCase):
def test_has_shell_command_called_correctly(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().has_shell_command(MOCK_SHELL_COMMAND)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', 'command', '-v', MOCK_SHELL_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_has_shell_command_with_existing_command(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
@@ -196,6 +240,7 @@ class AdbTest(unittest.TestCase):
def test_has_shell_command_with_missing_command_on_older_devices(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
mock_exec_cmd.side_effect = adb.AdbError(
MOCK_ADB_SHELL_COMMAND_CHECK, '', '', 0)
self.assertFalse(
@@ -203,6 +248,7 @@ class AdbTest(unittest.TestCase):
def test_has_shell_command_with_missing_command_on_newer_devices(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
mock_exec_cmd.side_effect = adb.AdbError(
MOCK_ADB_SHELL_COMMAND_CHECK, '', '', 1)
self.assertFalse(
diff --git a/trio/_path.py b/trio/_path.py
index 4b1bee16..7f777936 100644
--- a/trio/_path.py
+++ b/trio/_path.py
@@ -128,6 +128,28 @@ class Path(metaclass=AsyncAutoWrapperType):
self._wrapped = pathlib.Path(*args)
+ async def iterdir(self):
+ """
+ Like :meth:`pathlib.Path.iterdir`, but async.
+
+ This is an async method that returns a synchronous iterator, so you
+ use it like::
+
+ for subpath in await mypath.iterdir():
+ ...
+
+ Note that it actually loads the whole directory list into memory
+ immediately, during the initial call. (See `issue #501
+ <https://github.com/python-trio/trio/issues/501>`__ for discussion.)
+
+ """
+
+ def _load_items():
+ return list(self._wrapped.iterdir())
+
+ items = await trio.run_sync_in_worker_thread(_load_items)
+ return (Path(item) for item in items)
+
def __getattr__(self, name):
if name in self._forward:
value = getattr(self._wrapped, name)
trio.Path.iterdir wrapping is broken
Given `pathlib.Path.iterdir` returns a generator that does IO access on each iteration, `trio.Path.iterdir` is currently broken given it currently only generates the generator asynchronously (which I suppose is pointless given there is no need for IO at generator creation)
The solution would be to modify `trio.Path.iterdir` to return an async generator, however this means creating a special case given the current implementation is only an async wrapper on `pathlib.Path.iterdir`.
diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index f8b7255..4b014c9 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -25,7 +25,18 @@ class API(object): # pylint: disable=too-few-public-methods
"""
self._log.debug("Getting authenticated user details...")
- fields = "id,username,first_name,last_name,bio,created_at,counts,image"
+ fields = ",".join([
+ "id",
+ "username",
+ "first_name",
+ "last_name",
+ "bio",
+ "created_at",
+ "counts",
+ "image",
+ "account_type",
+ "url"
+ ])
result = self._io.get("me", {"fields": fields})
assert 'data' in result
diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index d8626f6..4118157 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -47,6 +47,14 @@ class Board(object):
"""
return self._data['name']
+ @property
+ def description(self):
+ """Gets the descriptive text associated with this board
+
+ :rtype: :class:`str`
+ """
+ return self._data['description']
+
@property
def url(self):
"""Web address for the UI associated with the dashboard
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index 42367b6..2230b65 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -109,7 +109,9 @@ class User(object):
"creator",
"created_at",
"counts",
- "image"
+ "image",
+ "reason",
+ "privacy"
])
}
@@ -119,6 +121,35 @@ class User(object):
for cur_item in cur_page['data']:
yield Board(cur_item, self._io)
+ def create_board(self, name, description=None):
+ """Creates a new board for the currently authenticated user
+
+ :param str name: name for the new board
+ :param str description: optional descriptive text for the board
+ :returns: reference to the newly created board
+ :rtype: :class:`friendlypins.board.Board`
+ """
+ properties = {
+ "fields": ','.join([
+ "id",
+ "name",
+ "url",
+ "description",
+ "creator",
+ "created_at",
+ "counts",
+ "image",
+ "reason",
+ "privacy"
+ ])
+ }
+
+ data = {"name": name}
+ if description:
+ data["description"] = description
+
+ result = self._io.post("boards", data, properties)
+ return Board(result['data'], self._io)
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/rest_io.py b/src/friendlypins/utils/rest_io.py
index 20456a5..ed7a77e 100644
--- a/src/friendlypins/utils/rest_io.py
+++ b/src/friendlypins/utils/rest_io.py
@@ -59,12 +59,44 @@ class RestIO(object):
properties["access_token"] = self._token
response = requests.get(temp_url, params=properties)
+
+ self._log.debug("Get response text is %s", response.text)
self._latest_header = Headers(response.headers)
self._log.debug("%s query header: %s", path, self._latest_header)
response.raise_for_status()
return response.json()
+ def post(self, path, data, properties=None):
+ """Posts API data to a given sub-path
+
+ :param str path: sub-path with in the REST API to send data to
+ :param dict data: form data to be posted to the API endpoint
+ :param dict properties:
+ optional set of request properties to append to the API call
+ :returns: json data returned from the API endpoint
+ :rtype: :class:`dict`
+ """
+ self._log.debug(
+ "Posting data from %s with options %s",
+ path,
+ properties
+ )
+ temp_url = "{0}/{1}/".format(self._root_url, path)
+
+ if properties is None:
+ properties = dict()
+ properties["access_token"] = self._token
+
+ response = requests.post(temp_url, data=data, params=properties)
+ self._latest_header = Headers(response.headers)
+ self._log.debug("%s query header: %s", path, self._latest_header)
+ self._log.debug("Post response text is %s", response.text)
+
+ response.raise_for_status()
+
+ return response.json()
+
def get_pages(self, path, properties=None):
"""Generator for iterating over paged results returned from API
Add code to create new boards
The next logical progression in the API development is to add code for creating new boards for a particular authenticated user.
diff --git a/ruuvitag_sensor/ruuvi.py b/ruuvitag_sensor/ruuvi.py
index ffd6bc6..0dffc62 100644
--- a/ruuvitag_sensor/ruuvi.py
+++ b/ruuvitag_sensor/ruuvi.py
@@ -202,13 +202,12 @@ class RuuviTagSensor(object):
Returns:
string: Sensor data
"""
+ # Search of FF990403 (Manufacturer Specific Data (FF) / Ruuvi Innovations ltd (9904) / Format 3 (03))
try:
- if len(raw) != 54:
+ if "FF990403" not in raw:
return None
- if raw[16:18] != '03':
- return None
-
- return raw[16:]
+ payload_start = raw.index("FF990403") + 6;
+ return raw[payload_start:]
except:
return None
Bug: incompatible with RuuviFW 1.2.8
The 1.2.8 update to Ruuvi Firmware trims extra NULLs at the end of transmission which breaks the data format type check. I can fix this and implement #29 .
diff --git a/myob/managers.py b/myob/managers.py
index a9010ec..1e17411 100644
--- a/myob/managers.py
+++ b/myob/managers.py
@@ -29,75 +29,45 @@ class Manager():
def build_method(self, method, endpoint, hint):
full_endpoint = self.base_url + endpoint
- required_args = re.findall('\[([^\]]*)\]', full_endpoint)
- if method in ('PUT', 'POST'):
- required_args.append('data')
+ url_keys = re.findall('\[([^\]]*)\]', full_endpoint)
template = full_endpoint.replace('[', '{').replace(']', '}')
+ required_kwargs = url_keys.copy()
+ if method in ('PUT', 'POST'):
+ required_kwargs.append('data')
+
def inner(*args, **kwargs):
if args:
raise AttributeError("Unnamed args provided. Only keyword args accepted.")
- # Ensure all required args have been provided.
- missing_args = set(required_args) - set(kwargs.keys())
- if missing_args:
- raise KeyError("Missing args %s. Endpoint requires %s." % (
- list(missing_args), required_args
+ # Ensure all required url kwargs have been provided.
+ missing_kwargs = set(required_kwargs) - set(kwargs.keys())
+ if missing_kwargs:
+ raise KeyError("Missing kwargs %s. Endpoint requires %s." % (
+ list(missing_kwargs), required_kwargs
))
+ # Parse kwargs.
+ url_kwargs = {}
+ request_kwargs_raw = {}
+ for k, v in kwargs.items():
+ if k in url_keys:
+ url_kwargs[k] = v
+ elif k != 'data':
+ request_kwargs_raw[k] = v
+
# Determine request method.
request_method = 'GET' if method == 'ALL' else method
# Build url.
- url = template.format(**kwargs)
-
- request_kwargs = {}
-
- # Build headers.
- request_kwargs['headers'] = {
- 'Authorization': 'Bearer %s' % self.credentials.oauth_token,
- 'x-myobapi-cftoken': self.credentials.userpass,
- 'x-myobapi-key': self.credentials.consumer_key,
- 'x-myobapi-version': 'v2',
- }
-
- # Build query.
- request_kwargs['params'] = {}
- filters = []
- for k, v in kwargs.items():
- if k not in required_args + ['orderby', 'format', 'headers', 'page', 'limit', 'templatename']:
- if isinstance(v, str):
- v = [v]
- filters.append(' or '.join("%s eq '%s'" % (k, v_) for v_ in v))
- if filters:
- request_kwargs['params']['$filter'] = '&'.join(filters)
-
- if 'orderby' in kwargs:
- request_kwargs['params']['$orderby'] = kwargs['orderby']
-
- page_size = DEFAULT_PAGE_SIZE
- if 'limit' in kwargs:
- page_size = int(kwargs['limit'])
- request_kwargs['params']['$top'] = page_size
-
- if 'page' in kwargs:
- request_kwargs['params']['$skip'] = (int(kwargs['page']) - 1) * page_size
+ url = template.format(**url_kwargs)
- if 'format' in kwargs:
- request_kwargs['params']['format'] = kwargs['format']
-
- if 'templatename' in kwargs:
- request_kwargs['params']['templatename'] = kwargs['templatename']
-
- if request_method in ('PUT', 'POST'):
- request_kwargs['params']['returnBody'] = 'true'
-
- if 'headers' in kwargs:
- request_kwargs['headers'].update(kwargs['headers'])
-
- # Build body.
- if 'data' in kwargs:
- request_kwargs['json'] = kwargs['data']
+ # Build request kwargs (header/query/body)
+ request_kwargs = self.build_request_kwargs(
+ request_method,
+ data=kwargs.get('data'),
+ **request_kwargs_raw,
+ )
response = requests.request(request_method, url, **request_kwargs)
@@ -129,11 +99,66 @@ class Manager():
elif hasattr(self, method_name):
method_name = '%s_%s' % (method.lower(), method_name)
self.method_details[method_name] = {
- 'args': required_args,
+ 'kwargs': required_kwargs,
'hint': hint,
}
setattr(self, method_name, inner)
+ def build_request_kwargs(self, method, data=None, **kwargs):
+ request_kwargs = {}
+
+ # Build headers.
+ request_kwargs['headers'] = {
+ 'Authorization': 'Bearer %s' % self.credentials.oauth_token,
+ 'x-myobapi-cftoken': self.credentials.userpass,
+ 'x-myobapi-key': self.credentials.consumer_key,
+ 'x-myobapi-version': 'v2',
+ }
+ if 'headers' in kwargs:
+ request_kwargs['headers'].update(kwargs['headers'])
+
+ # Build query.
+ request_kwargs['params'] = {}
+ filters = []
+ for k, v in kwargs.items():
+ if k not in ['orderby', 'format', 'headers', 'page', 'limit', 'templatename']:
+ if isinstance(v, str):
+ v = [v]
+ operator = 'eq'
+ for op in ['lt', 'gt']:
+ if k.endswith('__%s' % op):
+ k = k[:-4]
+ operator = op
+ filters.append(' or '.join("%s %s '%s'" % (k, operator, v_) for v_ in v))
+ if filters:
+ request_kwargs['params']['$filter'] = ' and '.join(filters)
+
+ if 'orderby' in kwargs:
+ request_kwargs['params']['$orderby'] = kwargs['orderby']
+
+ page_size = DEFAULT_PAGE_SIZE
+ if 'limit' in kwargs:
+ page_size = int(kwargs['limit'])
+ request_kwargs['params']['$top'] = page_size
+
+ if 'page' in kwargs:
+ request_kwargs['params']['$skip'] = (int(kwargs['page']) - 1) * page_size
+
+ if 'format' in kwargs:
+ request_kwargs['params']['format'] = kwargs['format']
+
+ if 'templatename' in kwargs:
+ request_kwargs['params']['templatename'] = kwargs['templatename']
+
+ if method in ('PUT', 'POST'):
+ request_kwargs['params']['returnBody'] = 'true'
+
+ # Build body.
+ if data is not None:
+ request_kwargs['json'] = data
+
+ return request_kwargs
+
def __repr__(self):
def print_method(name, args):
return '%s(%s)' % (name, ', '.join(args))
@@ -144,7 +169,7 @@ class Manager():
)
return '%s%s:\n %s' % (self.name, self.__class__.__name__, '\n '.join(
formatstr % (
- print_method(k, v['args']),
+ print_method(k, v['kwargs']),
v['hint'],
) for k, v in sorted(self.method_details.items())
))
Support for `gt` and `lt` filtering.
Hi there,
I can't find anything about this in the documentation, but does pymyob support query strings?
Thanks
Barton
Extension install failures timeout
The Windows GA reports a status which allows a fast failure, however the Linux GA just reports 'Not ready' which essentially waits for a CRP timeout. We should investigate if there is a substatus we are missing to allow a fast failure.
Azure/WALinuxAgent
diff --git a/tests/ga/test_exthandlers.py b/tests/ga/test_exthandlers.py
new file mode 100644
index 00000000..248750b1
--- /dev/null
+++ b/tests/ga/test_exthandlers.py
@@ -0,0 +1,74 @@
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the Apache License.
+import json
+
+from azurelinuxagent.common.protocol.restapi import ExtensionStatus
+from azurelinuxagent.ga.exthandlers import parse_ext_status
+from tests.tools import *
+
+
+class TestExtHandlers(AgentTestCase):
+ def test_parse_extension_status00(self):
+ """
+ Parse a status report for a successful execution of an extension.
+ """
+
+ s = '''[{
+ "status": {
+ "status": "success",
+ "formattedMessage": {
+ "lang": "en-US",
+ "message": "Command is finished."
+ },
+ "operation": "Daemon",
+ "code": "0",
+ "name": "Microsoft.OSTCExtensions.CustomScriptForLinux"
+ },
+ "version": "1.0",
+ "timestampUTC": "2018-04-20T21:20:24Z"
+ }
+]'''
+ ext_status = ExtensionStatus(seq_no=0)
+ parse_ext_status(ext_status, json.loads(s))
+
+ self.assertEqual('0', ext_status.code)
+ self.assertEqual(None, ext_status.configurationAppliedTime)
+ self.assertEqual('Command is finished.', ext_status.message)
+ self.assertEqual('Daemon', ext_status.operation)
+ self.assertEqual('success', ext_status.status)
+ self.assertEqual(0, ext_status.sequenceNumber)
+ self.assertEqual(0, len(ext_status.substatusList))
+
+ def test_parse_extension_status01(self):
+ """
+ Parse a status report for a failed execution of an extension.
+
+ The extension returned a bad status/status of failed.
+ The agent should handle this gracefully, and convert all unknown
+ status/status values into an error.
+ """
+
+ s = '''[{
+ "status": {
+ "status": "failed",
+ "formattedMessage": {
+ "lang": "en-US",
+ "message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..."
+ },
+ "operation": "Enable",
+ "code": "0",
+ "name": "Microsoft.OSTCExtensions.CustomScriptForLinux"
+ },
+ "version": "1.0",
+ "timestampUTC": "2018-04-20T20:50:22Z"
+}]'''
+ ext_status = ExtensionStatus(seq_no=0)
+ parse_ext_status(ext_status, json.loads(s))
+
+ self.assertEqual('0', ext_status.code)
+ self.assertEqual(None, ext_status.configurationAppliedTime)
+ self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message)
+ self.assertEqual('Enable', ext_status.operation)
+ self.assertEqual('error', ext_status.status)
+ self.assertEqual(0, ext_status.sequenceNumber)
+ self.assertEqual(0, len(ext_status.substatusList))
diff --git a/nbgrader/converters/autograde.py b/nbgrader/converters/autograde.py
index 62327f17..57c662ab 100644
--- a/nbgrader/converters/autograde.py
+++ b/nbgrader/converters/autograde.py
@@ -2,7 +2,7 @@ import os
import shutil
from textwrap import dedent
-from traitlets import Bool, List
+from traitlets import Bool, List, Dict
from .base import BaseConverter, NbGraderException
from ..preprocessors import (
@@ -24,6 +24,19 @@ class Autograde(BaseConverter):
)
).tag(config=True)
+ exclude_overwriting = Dict(
+ {},
+ help=dedent(
+ """
+ A dictionary with keys corresponding to assignment names and values
+ being a list of filenames (relative to the assignment's source
+ directory) that should NOT be overwritten with the source version.
+ This is to allow students to e.g. edit a python file and submit it
+ alongside the notebooks in their assignment.
+ """
+ )
+ ).tag(config=True)
+
_sanitizing = True
@property
@@ -109,7 +122,9 @@ class Autograde(BaseConverter):
self.log.info("Overwriting files with master versions from the source directory")
dest_path = self._format_dest(assignment_id, student_id)
source_path = self.coursedir.format_path(self.coursedir.source_directory, '.', assignment_id)
- source_files = utils.find_all_files(source_path, self.coursedir.ignore + ["*.ipynb"])
+ source_files = set(utils.find_all_files(source_path, self.coursedir.ignore + ["*.ipynb"]))
+ exclude_files = set([os.path.join(source_path, x) for x in self.exclude_overwriting.get(assignment_id, [])])
+ source_files = list(source_files - exclude_files)
# copy them to the build directory
for filename in source_files:
Have submitted notebooks import from local directory
I had students edit a python file and then submit it along with the notebooks. However, when I run the autograder, nbgrader imports the python file from my source directory instead of the submitted one. This, of course, leads to the test cells that test their implementation always passing no matter what they do (and also makes it so that, if they added and rely on any further functionality that's not in my solution, then those blocks fail!). Is there any way to have the submitted notebooks import from the submitted .py file?
Improper handling of 401 authentication failures
In a call flow such as
* send REGISTER
* get 401 response with auth challenge
* send REGISTER with valid authentication
* get 401 response with no challenge (ie: your credentials are fine, but still denied)
The client will continue to retransmit the request with the authorization header
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 43e932e..b503d95 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -5,11 +5,13 @@ de la API de CKAN.
"""
from __future__ import print_function
+import logging
from ckanapi import RemoteCKAN
-from ckanapi.errors import NotFound
+from ckanapi.errors import NotFound, NotAuthorized
from .ckan_utils import map_dataset_to_package, map_theme_to_group
from .search import get_datasets
+logger = logging.getLogger(__name__)
def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url, apikey, catalog_id=None,
@@ -250,14 +252,20 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
Returns:
str: El id del dataset en el catálogo de destino.
"""
- dataset_list = dataset_list or [ds['identifier']
- for ds in catalog.datasets]
+ # Evitar entrar con valor falsy
+ if dataset_list is None:
+ dataset_list = [ds['identifier'] for ds in catalog.datasets]
owner_org = owner_org or catalog_id
harvested = []
for dataset_id in dataset_list:
- harvested_id = harvest_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
- harvested.append(harvested_id)
+ try:
+ harvested_id = harvest_dataset_to_ckan(
+ catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
+ harvested.append(harvested_id)
+ except (NotAuthorized, NotFound, KeyError, TypeError) as e:
+ logger.error("Error federando catalogo:"+catalog_id+", dataset:"+dataset_id + "al portal: "+portal_url)
+ logger.error(str(e))
+
return harvested
Robustecer el manejo de harvest_catalog_to_ckan()
Hay que corregir 2 problemas:
- En caso de pasar una lista vacía en el dataset list, no se debe federar ningún dataset. Actualmente se federan todos.
-En caso de que alguno de las llamadas a `harvest_dataset_to_ckan()` falle, loggear y continuar con el resto. Actualmente la federación entera del catálogo levanta la excepción.
datosgobar/pydatajson
diff --git a/tests/test_federation.py b/tests/test_federation.py
index fe95079..9d0515f 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -223,6 +223,13 @@ class PushDatasetTestCase(unittest.TestCase):
self.assertCountEqual([self.catalog_id+'_'+ds['identifier'] for ds in self.catalog.datasets],
harvested_ids)
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_harvest_catalog_with_empty_list(self, mock_portal):
+ harvested_ids = harvest_catalog_to_ckan(self.catalog, 'portal', 'key', self.catalog_id,
+ owner_org='owner', dataset_list=[])
+ mock_portal.assert_not_called()
+ self.assertEqual([], harvested_ids)
+
class RemoveDatasetTestCase(unittest.TestCase):
diff --git a/neet/boolean/logicnetwork.py b/neet/boolean/logicnetwork.py
index b9342f1..b173fdf 100644
--- a/neet/boolean/logicnetwork.py
+++ b/neet/boolean/logicnetwork.py
@@ -109,13 +109,13 @@ class LogicNetwork(object):
# Encode the mask.
mask_code = long(0)
for idx in indices:
- mask_code += 2 ** idx # Low order, low index.
+ mask_code += 2 ** long(idx) # Low order, low index.
# Encode each condition of truth table.
encoded_sub_table = set()
for condition in conditions:
encoded_condition = long(0)
for idx, state in zip(indices, condition):
- encoded_condition += 2 ** idx if long(state) else 0
+ encoded_condition += 2 ** long(idx) if int(state) else 0
encoded_sub_table.add(encoded_condition)
self._encoded_table.append((mask_code, encoded_sub_table))
LogicNetwork table encoding issue
See comments on the team_grn slack channel. I'll add more here later.
ELIFE-ASU/Neet
diff --git a/test/test_logic.py b/test/test_logic.py
index 523d2d9..304c019 100644
--- a/test/test_logic.py
+++ b/test/test_logic.py
@@ -2,7 +2,8 @@
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
"""Unit test for LogicNetwork"""
-import unittest
+import unittest, numpy as np
+from neet.python3 import *
from neet.boolean import LogicNetwork
from neet.exceptions import FormatError
@@ -27,6 +28,16 @@ class TestLogicNetwork(unittest.TestCase):
self.assertEqual(['A', 'B'], net.names)
self.assertEqual([(2, {0, 2}), (1, {1})], net._encoded_table)
+ def test_init_long(self):
+ table = [((), set()) for _ in range(65)]
+ table[0] = ((np.int64(64),), set('1'))
+
+ mask = long(2)**64
+
+ net = LogicNetwork(table)
+ self.assertEqual(net.table, table)
+ self.assertEqual(net._encoded_table[0], (mask, set([mask])))
+
def test_inplace_update(self):
net = LogicNetwork([((1,), {'0', '1'}), ((0,), {'1'})])
state = [0, 1]
diff --git a/odml/format.py b/odml/format.py
index bae2d68..7a0a796 100644
--- a/odml/format.py
+++ b/odml/format.py
@@ -130,7 +130,7 @@ class Section(Format):
_args = {
'id': 0,
'type': 1,
- 'name': 0,
+ 'name': 1,
'definition': 0,
'reference': 0,
'link': 0,
diff --git a/odml/property.py b/odml/property.py
index 2602dea..f6d0211 100644
--- a/odml/property.py
+++ b/odml/property.py
@@ -13,7 +13,7 @@ class BaseProperty(base.BaseObject):
"""An odML Property"""
_format = frmt.Property
- def __init__(self, name, value=None, parent=None, unit=None,
+ def __init__(self, name=None, value=None, parent=None, unit=None,
uncertainty=None, reference=None, definition=None,
dependency=None, dependency_value=None, dtype=None,
value_origin=None, id=None):
@@ -58,6 +58,11 @@ class BaseProperty(base.BaseObject):
print(e)
self._id = str(uuid.uuid4())
+ # Use id if no name was provided.
+ if not name:
+ name = self._id
+
+ self._name = name
self._parent = None
self._name = name
self._value_origin = value_origin
@@ -118,6 +123,14 @@ class BaseProperty(base.BaseObject):
@name.setter
def name(self, new_name):
+ if self.name == new_name:
+ return
+
+ curr_parent = self.parent
+ if hasattr(curr_parent, "properties") and new_name in curr_parent.properties:
+
+ raise KeyError("Object with the same name already exists!")
+
self._name = new_name
def __repr__(self):
diff --git a/odml/section.py b/odml/section.py
index fa08c1c..4707003 100644
--- a/odml/section.py
+++ b/odml/section.py
@@ -25,7 +25,7 @@ class BaseSection(base.Sectionable):
_format = format.Section
- def __init__(self, name, type=None, parent=None,
+ def __init__(self, name=None, type=None, parent=None,
definition=None, reference=None,
repository=None, link=None, include=None, id=None):
@@ -42,6 +42,10 @@ class BaseSection(base.Sectionable):
print(e)
self._id = str(uuid.uuid4())
+ # Use id if no name was provided.
+ if not name:
+ name = self._id
+
self._parent = None
self._name = name
self._definition = definition
@@ -94,6 +98,13 @@ class BaseSection(base.Sectionable):
@name.setter
def name(self, new_value):
+ if self.name == new_value:
+ return
+
+ curr_parent = self.parent
+ if hasattr(curr_parent, "sections") and new_value in curr_parent.sections:
+ raise KeyError("Object with the same name already exists!")
+
self._name = new_value
@property
diff --git a/odml/tools/odmlparser.py b/odml/tools/odmlparser.py
index fbc7c71..2edd2e5 100644
--- a/odml/tools/odmlparser.py
+++ b/odml/tools/odmlparser.py
@@ -48,6 +48,10 @@ class ODMLWriter:
raise ParserException(msg)
with open(filename, 'w') as file:
+ # Add XML header to support odML stylesheets.
+ if self.parser == 'XML':
+ file.write(xmlparser.XMLWriter.header)
+
file.write(self.to_string(odml_document))
def to_string(self, odml_document):
diff --git a/odml/tools/xmlparser.py b/odml/tools/xmlparser.py
index f2ea862..c935c99 100644
--- a/odml/tools/xmlparser.py
+++ b/odml/tools/xmlparser.py
@@ -5,11 +5,11 @@ Parses odML files. Can be invoked standalone:
python -m odml.tools.xmlparser file.odml
"""
import csv
+import sys
from lxml import etree as ET
from lxml.builder import E
# this is needed for py2exe to include lxml completely
from lxml import _elementpath as _dummy
-import sys
try:
from StringIO import StringIO
@@ -118,10 +118,9 @@ class XMLWriter:
else:
data = str(self)
- f = open(filename, "w")
- f.write(self.header)
- f.write(data)
- f.close()
+ with open(filename, "w") as file:
+ file.write(self.header)
+ file.write(data)
def load(filename):
@@ -223,18 +222,20 @@ class XMLReader(object):
return None # won't be able to parse this one
return getattr(self, "parse_" + node.tag)(node, self.tags[node.tag])
- def parse_tag(self, root, fmt, insert_children=True, create=None):
+ def parse_tag(self, root, fmt, insert_children=True):
"""
Parse an odml node based on the format description *fmt*
- and a function *create* to instantiate a corresponding object
+ and instantiate the corresponding object.
+ :param root: lxml.etree node containing an odML object or object tree.
+ :param fmt: odML class corresponding to the content of the root node.
+ :param insert_children: Bool value. When True, child elements of the root node
+ will be parsed to their odML equivalents and appended to
+ the odML document. When False, child elements of the
+ root node will be ignored.
"""
arguments = {}
extra_args = {}
children = []
- text = []
-
- if root.text:
- text.append(root.text.strip())
for k, v in root.attrib.iteritems():
k = k.lower()
@@ -258,8 +259,6 @@ class XMLReader(object):
else:
tag = fmt.map(node.tag)
if tag in arguments:
- # TODO make this an error, however first figure out a
- # way to let <odML version=><version/> pass
self.warn("Element <%s> is given multiple times in "
"<%s> tag" % (node.tag, root.tag), node)
@@ -273,38 +272,21 @@ class XMLReader(object):
else:
self.error("Invalid element <%s> in odML document section <%s>"
% (node.tag, root.tag), node)
- if node.tail:
- text.append(node.tail.strip())
if sys.version_info > (3,):
- self.check_mandatory_arguments(dict(list(arguments.items()) +
- list(extra_args.items())),
- fmt, root.tag, root)
+ check_args = dict(list(arguments.items()) + list(extra_args.items()))
else:
- self.check_mandatory_arguments(dict(arguments.items() +
- extra_args.items()),
- fmt, root.tag, root)
- if create is None:
- obj = fmt.create()
- else:
- obj = create(args=arguments, text=''.join(text), children=children)
+ check_args = dict(arguments.items() + extra_args.items())
- for k, v in arguments.items():
- if hasattr(obj, k) and (getattr(obj, k) is None or k == 'id'):
- try:
- if k == 'id' and v is not None:
- obj._id = v
- else:
- setattr(obj, k, v)
- except Exception as e:
- self.warn("cannot set '%s' property on <%s>: %s" %
- (k, root.tag, repr(e)), root)
- if not self.ignore_errors:
- raise e
+ self.check_mandatory_arguments(check_args, fmt, root.tag, root)
+
+ # Instantiate the current odML object with the parsed attributes.
+ obj = fmt.create(**arguments)
if insert_children:
for child in children:
obj.append(child)
+
return obj
def parse_odML(self, root, fmt):
@@ -312,24 +294,10 @@ class XMLReader(object):
return doc
def parse_section(self, root, fmt):
- name = root.get("name") # property name= overrides
- if name is None: # the element
- name_node = root.find("name")
- if name_node is not None:
- name = name_node.text
- root.remove(name_node)
- # delete the name_node so its value won't
- # be used to overwrite the already set name-attribute
-
- if name is None:
- self.error("Missing name element in <section>", root)
-
- return self.parse_tag(root, fmt,
- create=lambda **kargs: fmt.create(name))
+ return self.parse_tag(root, fmt)
def parse_property(self, root, fmt):
- create = lambda children, args, **kargs: fmt.create(**args)
- return self.parse_tag(root, fmt, insert_children=False, create=create)
+ return self.parse_tag(root, fmt, insert_children=False)
if __name__ == '__main__':
odML Format update
Define Section `name` and `type` as well as Property `name` as required in `format.py`.
G-Node/python-odml
diff --git a/test/test_property.py b/test/test_property.py
index 9138cae..9eafedb 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -327,6 +327,40 @@ class TestProperty(unittest.TestCase):
assert(p.dtype == 'string')
assert(p.value == ['7', '20', '1 Dog', 'Seven'])
+ def test_name(self):
+ # Test id is used when name is not provided
+ p = Property()
+ self.assertIsNotNone(p.name)
+ self.assertEqual(p.name, p.id)
+
+ # Test name is properly set on init
+ name = "rumpelstilzchen"
+ p = Property(name)
+ self.assertEqual(p.name, name)
+
+ # Test name can be properly set on single and connected Properties
+ prop = Property()
+ self.assertNotEqual(prop.name, "prop")
+ prop.name = "prop"
+ self.assertEqual(prop.name, "prop")
+
+ sec = Section()
+ prop_a = Property(parent=sec)
+ self.assertNotEqual(prop_a.name, "prop_a")
+ prop_a.name = "prop_a"
+ self.assertEqual(prop_a.name, "prop_a")
+
+ # Test property name can be changed with siblings
+ prop_b = Property(name="prop_b", parent=sec)
+ self.assertEqual(prop_b.name, "prop_b")
+ prop_b.name = "prop"
+ self.assertEqual(prop_b.name, "prop")
+
+ # Test property name set will fail on existing sibling with same name
+ with self.assertRaises(KeyError):
+ prop_b.name = "prop_a"
+ self.assertEqual(prop_b.name, "prop")
+
def test_parent(self):
p = Property("property_section", parent=Section("S"))
self.assertIsInstance(p.parent, BaseSection)
diff --git a/test/test_section.py b/test/test_section.py
index 84604aa..5581928 100644
--- a/test/test_section.py
+++ b/test/test_section.py
@@ -39,6 +39,50 @@ class TestSection(unittest.TestCase):
sec.definition = ""
self.assertIsNone(sec.definition)
+ def test_name(self):
+ # Test id is used when name is not provided
+ s = Section()
+ self.assertIsNotNone(s.name)
+ self.assertEqual(s.name, s.id)
+
+ # Test name is properly set on init
+ name = "rumpelstilzchen"
+ s = Section(name)
+ self.assertEqual(s.name, name)
+
+ name = "rumpelstilzchen"
+ s = Section(name=name)
+ self.assertEqual(s.name, name)
+
+ # Test name can be properly set on single and connected Sections
+ sec = Section()
+ self.assertNotEqual(sec.name, "sec")
+ sec.name = "sec"
+ self.assertEqual(sec.name, "sec")
+
+ subsec_a = Section(parent=sec)
+ self.assertNotEqual(subsec_a.name, "subsec_a")
+ subsec_a.name = "subsec_a"
+ self.assertEqual(subsec_a.name, "subsec_a")
+
+ # Test subsection name can be changed with siblings
+ subsec_b = Section(name="subsec_b", parent=sec)
+ self.assertEqual(subsec_b.name, "subsec_b")
+ subsec_b.name = "subsec"
+ self.assertEqual(subsec_b.name, "subsec")
+
+ # Test subsection name set will fail on existing sibling with same name
+ with self.assertRaises(KeyError):
+ subsec_b.name = "subsec_a"
+ self.assertEqual(subsec_b.name, "subsec")
+
+ # Test section name set will fail on existing same name document sibling
+ doc = Document()
+ sec_a = Section(name="a", parent=doc)
+ sec_b = Section(name="b", parent=doc)
+ with self.assertRaises(KeyError):
+ sec_b.name = "a"
+
def test_parent(self):
s = Section("Section")
self.assertIsNone(s.parent)
diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index 797439608..f15749508 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -456,6 +456,8 @@ def write_dataset(fp, dataset, parent_encoding=default_encoding):
Attempt to correct ambiguous VR elements when explicit little/big
encoding Elements that can't be corrected will be returned unchanged.
"""
+ _harmonize_properties(dataset, fp)
+
if not fp.is_implicit_VR and not dataset.is_original_encoding:
dataset = correct_ambiguous_vr(dataset, fp.is_little_endian)
@@ -475,6 +477,22 @@ def write_dataset(fp, dataset, parent_encoding=default_encoding):
return fp.tell() - fpStart
+def _harmonize_properties(dataset, fp):
+ """Make sure the properties in the dataset and the file pointer are
+ consistent, so the user can set both with the same effect.
+ Properties set on the destination file object always have preference.
+ """
+ # ensure preference of fp over dataset
+ if hasattr(fp, 'is_little_endian'):
+ dataset.is_little_endian = fp.is_little_endian
+ if hasattr(fp, 'is_implicit_VR'):
+ dataset.is_implicit_VR = fp.is_implicit_VR
+
+ # write the properties back to have a consistent state
+ fp.is_implicit_VR = dataset.is_implicit_VR
+ fp.is_little_endian = dataset.is_little_endian
+
+
def write_sequence(fp, data_element, encoding):
"""Write a dicom Sequence contained in data_element to the file fp."""
# write_data_element has already written the VR='SQ' (if needed) and
Write failure with implicit -> explicit VR
```python
>>> from pydicom import dcmread
>>> from pydicom.filebase import DicomBytesIO
>>> from pydicom.filewriter import write_dataset
>>> ds = dcmread('dicom_files/RTImageStorage.dcm')
>>> fp = DicomBytesIO()
>>> fp.is_little_endian = True
>>> fp.is_implicit_VR = False
>>> write_dataset(fp, ds)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/.../pydicom/pydicom/filewriter.py", line 473, in write_dataset
write_data_element(fp, dataset.get_item(tag), dataset_encoding)
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/.../pydicom/pydicom/tag.py", line 37, in tag_in_exception
raise type(ex)(msg)
TypeError: With tag (0008, 0008) got exception: object of type 'NoneType' has no len()
Traceback (most recent call last):
File "/.../pydicom/pydicom/tag.py", line 30, in tag_in_exception
yield
File "/.../pydicom/pydicom/filewriter.py", line 473, in write_dataset
write_data_element(fp, dataset.get_item(tag), dataset_encoding)
File "/.../pydicom/pydicom/filewriter.py", line 384, in write_data_element
if len(VR) != 2:
TypeError: object of type 'NoneType' has no len()
```
Probably related to the #616 PR @mrbean-bremen
pydicom/pydicom
diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index 464d6b172..4b943d651 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -1129,6 +1129,46 @@ class TestWriteToStandard(object):
for elem_in, elem_out in zip(ds_explicit, ds_out):
assert elem_in == elem_out
+ def test_write_dataset(self):
+ # make sure writing and reading back a dataset works correctly
+ ds = dcmread(mr_implicit_name)
+ fp = DicomBytesIO()
+ write_dataset(fp, ds)
+ fp.seek(0)
+ ds_read = read_dataset(fp, is_implicit_VR=True, is_little_endian=True)
+ for elem_orig, elem_read in zip(ds_read, ds):
+ assert elem_orig == elem_read
+
+ def test_write_dataset_with_explicit_vr(self):
+ # make sure conversion from implicit to explicit VR does not
+ # raise (regression test for #632)
+ ds = dcmread(mr_implicit_name)
+ fp = DicomBytesIO()
+ fp.is_implicit_VR = False
+ fp.is_little_endian = True
+ write_dataset(fp, ds)
+ fp.seek(0)
+ ds_read = read_dataset(fp, is_implicit_VR=False, is_little_endian=True)
+ for elem_orig, elem_read in zip(ds_read, ds):
+ assert elem_orig == elem_read
+
+ def test_convert_implicit_to_explicit_vr_using_destination(self):
+ # make sure conversion from implicit to explicit VR works
+ # if setting the property in the destination
+ ds = dcmread(mr_implicit_name)
+ ds.is_implicit_VR = False
+ ds.file_meta.TransferSyntaxUID = '1.2.840.10008.1.2.1'
+ fp = DicomBytesIO()
+ fp.is_implicit_VR = False
+ fp.is_little_endian = True
+ ds.save_as(fp, write_like_original=False)
+ fp.seek(0)
+ ds_out = dcmread(fp)
+ ds_explicit = dcmread(mr_name)
+
+ for elem_in, elem_out in zip(ds_explicit, ds_out):
+ assert elem_in == elem_out
+
def test_convert_explicit_to_implicit_vr(self):
# make sure conversion from explicit to implicit VR works
# without private tags
timhoffm: Note: CI currently fails because of pip changes. Should be fixed by #174.
jnothman: if you've not had to modify any tests, how do we know this affects output?
timhoffm: As said above, I didn't run any tests so far myself. It's apparent that one parser for "Parameters" and "Returns" cannot get both right. I'm confident that the proposed code change itself is correct. What has to be still shown is that the calling code and tests work with that (it might be that they partly compensate for the original bug). I thought I'd use the tests in CI for that, but CI is already prematurely failing for different reasons.
timhoffm: Rebased onto master.
*Note:* The tests do currently fail. Waiting for #176 before doing any further changes to fix tests.
timhoffm: PR updated.
Single element returns params such as:
~~~
Returns
-------
int
The return value.
~~~
were detected as names. I.e. `int` was considered a name. This logical error has been fixed such that `int` is now a type and the name is empty.
As a consequence, `int` is not formatted bold anymore. This is consistent with the formatting of types in patterns like `x : int` and a prerequisite for type references like ``:class:`MyClass` `` to work in this position.
larsoner: @timhoffm can you rebase? Then I can take a look and hopefully merge
rgommers: I've taken the liberty of fixing the merge conflict. The only nontrivial change was deciding where the new heading `Receives` goes; I added it to `'Returns', 'Yields', 'Raises', 'Warns'`.
diff --git a/numpydoc/docscrape.py b/numpydoc/docscrape.py
index 02afd88..32245a9 100644
--- a/numpydoc/docscrape.py
+++ b/numpydoc/docscrape.py
@@ -220,7 +220,7 @@ class NumpyDocString(Mapping):
else:
yield name, self._strip(data[2:])
- def _parse_param_list(self, content):
+ def _parse_param_list(self, content, single_element_is_type=False):
r = Reader(content)
params = []
while not r.eof():
@@ -228,7 +228,10 @@ class NumpyDocString(Mapping):
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
- arg_name, arg_type = header, ''
+ if single_element_is_type:
+ arg_name, arg_type = '', header
+ else:
+ arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
@@ -393,10 +396,12 @@ class NumpyDocString(Mapping):
self._error_location("The section %s appears twice"
% section)
- if section in ('Parameters', 'Returns', 'Yields', 'Receives',
- 'Raises', 'Warns', 'Other Parameters', 'Attributes',
+ if section in ('Parameters', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
+ elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):
+ self[section] = self._parse_param_list(
+ content, single_element_is_type=True)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
@@ -452,10 +457,12 @@ class NumpyDocString(Mapping):
if self[name]:
out += self._str_header(name)
for param in self[name]:
+ parts = []
+ if param.name:
+ parts.append(param.name)
if param.type:
- out += ['%s : %s' % (param.name, param.type)]
- else:
- out += [param.name]
+ parts.append(param.type)
+ out += [' : '.join(parts)]
if param.desc and ''.join(param.desc).strip():
out += self._str_indent(param.desc)
out += ['']
@@ -637,7 +644,7 @@ class ClassDoc(NumpyDocString):
if _members is ALL:
_members = None
_exclude = config.get('exclude-members', [])
-
+
if config.get('show_class_members', True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
@@ -649,7 +656,7 @@ class ClassDoc(NumpyDocString):
if not self[field]:
doc_list = []
for name in sorted(items):
- if (name in _exclude or
+ if (name in _exclude or
(_members and name not in _members)):
continue
try:
diff --git a/numpydoc/docscrape_sphinx.py b/numpydoc/docscrape_sphinx.py
index 9b23235..aad64c7 100644
--- a/numpydoc/docscrape_sphinx.py
+++ b/numpydoc/docscrape_sphinx.py
@@ -70,19 +70,19 @@ class SphinxDocString(NumpyDocString):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
- typed_fmt = '**%s** : %s'
- untyped_fmt = '**%s**'
+ named_fmt = '**%s** : %s'
+ unnamed_fmt = '%s'
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param in self[name]:
- if param.type:
- out += self._str_indent([typed_fmt % (param.name.strip(),
+ if param.name:
+ out += self._str_indent([named_fmt % (param.name.strip(),
param.type)])
else:
- out += self._str_indent([untyped_fmt % param.name.strip()])
+ out += self._str_indent([unnamed_fmt % param.type.strip()])
if not param.desc:
out += self._str_indent(['..'], 8)
else:
@@ -209,12 +209,13 @@ class SphinxDocString(NumpyDocString):
display_param, desc = self._process_param(param.name,
param.desc,
fake_autosummary)
-
+ parts = []
+ if display_param:
+ parts.append(display_param)
if param.type:
- out += self._str_indent(['%s : %s' % (display_param,
- param.type)])
- else:
- out += self._str_indent([display_param])
+ parts.append(param.type)
+ out += self._str_indent([' : '.join(parts)])
+
if desc and self.use_blockquotes:
out += ['']
elif not desc:
@@ -376,8 +377,8 @@ class SphinxDocString(NumpyDocString):
'yields': self._str_returns('Yields'),
'receives': self._str_returns('Receives'),
'other_parameters': self._str_param_list('Other Parameters'),
- 'raises': self._str_param_list('Raises'),
- 'warns': self._str_param_list('Warns'),
+ 'raises': self._str_returns('Raises'),
+ 'warns': self._str_returns('Warns'),
'warnings': self._str_warnings(),
'see_also': self._str_see_also(func_role),
'notes': self._str_section('Notes'),
Anonymous return values have their types populated in the name slot of the tuple.
I noticed an inconsistency, when using numpydoc version 0.6.0 in python2.7 on Ubuntu. The parsed return section information returns different styles of tuple depending on if the return value is anoymous or not.
Here is a minimal working example:
```python
def mwe():
from numpydoc.docscrape import NumpyDocString
docstr = (
'Returns\n'
'----------\n'
'int\n'
' can return an anoymous integer\n'
'out : ndarray\n'
' can return a named value\n'
)
doc = NumpyDocString(docstr)
returns = doc._parsed_data['Returns']
print(returns)
```
This results in
```python
[(u'int', '', [u'can return an anoymous integer']),
(u'out', u'ndarray', [u'can return a named value'])]
```
However judging by tests (due to lack of docs), I believe it was indented that each value in the returns list should be a tuple of `(arg, arg_type, arg_desc)`. Therefore we should see this instead:
```python
[('', u'int', [u'can return an anoymous integer']),
(u'out', u'ndarray', [u'can return a named value'])]
```
My current workaround is this:
```python
for p_name, p_type, p_descr in returns:
if not p_type:
p_name = ''
p_type = p_name
```
numpy/numpydoc
diff --git a/numpydoc/tests/test_docscrape.py b/numpydoc/tests/test_docscrape.py
index b4b7e03..e5e3f1f 100644
--- a/numpydoc/tests/test_docscrape.py
+++ b/numpydoc/tests/test_docscrape.py
@@ -211,14 +211,14 @@ def test_returns():
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
- assert arg == 'list of str'
- assert arg_type == ''
+ assert arg == ''
+ assert arg_type == 'list of str'
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
arg, arg_type, desc = doc['Returns'][2]
- assert arg == 'no_description'
- assert arg_type == ''
+ assert arg == ''
+ assert arg_type == 'no_description'
assert not ''.join(desc).strip()
@@ -227,7 +227,7 @@ def test_yields():
assert len(section) == 3
truth = [('a', 'int', 'apples.'),
('b', 'int', 'bananas.'),
- ('int', '', 'unknowns.')]
+ ('', 'int', 'unknowns.')]
for (arg, arg_type, desc), (arg_, arg_type_, end) in zip(section, truth):
assert arg == arg_
assert arg_type == arg_type_
@@ -594,11 +594,11 @@ of the one-dimensional normal distribution to higher dimensions.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
- **list of str**
+ list of str
This is not a real return value. It exists to test
anonymous return values.
- **no_description**
+ no_description
..
:Other Parameters:
@@ -608,12 +608,12 @@ of the one-dimensional normal distribution to higher dimensions.
:Raises:
- **RuntimeError**
+ RuntimeError
Some error
:Warns:
- **RuntimeWarning**
+ RuntimeWarning
Some warning
.. warning::
@@ -687,7 +687,7 @@ def test_sphinx_yields_str():
**b** : int
The number of bananas.
- **int**
+ int
The number of unknowns.
""")
@@ -754,16 +754,18 @@ doc5 = NumpyDocString(
def test_raises():
assert len(doc5['Raises']) == 1
- name, _, desc = doc5['Raises'][0]
- assert name == 'LinAlgException'
- assert desc == ['If array is singular.']
+ param = doc5['Raises'][0]
+ assert param.name == ''
+ assert param.type == 'LinAlgException'
+ assert param.desc == ['If array is singular.']
def test_warns():
assert len(doc5['Warns']) == 1
- name, _, desc = doc5['Warns'][0]
- assert name == 'SomeWarning'
- assert desc == ['If needed']
+ param = doc5['Warns'][0]
+ assert param.name == ''
+ assert param.type == 'SomeWarning'
+ assert param.desc == ['If needed']
def test_see_also():
@@ -995,7 +997,7 @@ def test_use_blockquotes():
GHI
- **JKL**
+ JKL
MNO
''')
diff --git a/nbgrader/utils.py b/nbgrader/utils.py
index 55824f3f..55f440ab 100644
--- a/nbgrader/utils.py
+++ b/nbgrader/utils.py
@@ -194,8 +194,10 @@ def find_all_files(path, exclude=None):
"""Recursively finds all filenames rooted at `path`, optionally excluding
some based on filename globs."""
files = []
+ to_skip = []
for dirname, dirnames, filenames in os.walk(path):
- if is_ignored(dirname, exclude):
+ if is_ignored(dirname, exclude) or dirname in to_skip:
+ to_skip.extend([os.path.join(dirname, x) for x in dirnames])
continue
for filename in filenames:
fullpath = os.path.join(dirname, filename)
Unexpected behaviour of utils.find_all_files
<!--
Thanks for helping to improve nbgrader!
If you are submitting a bug report or looking for support, please use the below
template so we can efficiently solve the problem.
If you are requesting a new feature, feel free to remove irrelevant pieces of
the issue template.
-->
### Operating system
Ubunto 16.04
### `nbgrader --version`
nbgrader version 0.5.4
### `jupyterhub --version` (if used with JupyterHub)
0.8.1
### `jupyter notebook --version`
5.4.1
### Expected behavior
By including '.git' or '.git/**' in CourseDirectory.ignore anything under the git directory to be ignored.
### Actual behavior
Anything in subdirectories of '.git' is included.
### Steps to reproduce the behavior
$ mkdir -p foo/bar/qwe
$ touch foo/bar/qwe/file.py
$ /opt/conda/bin/python -c "from nbgrader.utils import find_all_files;print(find_all_files('foo', ['bar']))"
['foo/bar/qwe/file.py']
I'm sorry if this is expected behaviour but I found it surprising.
boumenot: I opened #1161 to address the telemetry issue. There is some sort of circular dependency issue that manifest on CI, but not locally. I will debug it later, and add the necessary event.
diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py
index 841f9b72..265b1f6f 100644
--- a/azurelinuxagent/common/protocol/wire.py
+++ b/azurelinuxagent/common/protocol/wire.py
@@ -600,6 +600,12 @@ class WireClient(object):
random.shuffle(version_uris_shuffled)
for version in version_uris_shuffled:
+ # GA expects a location and failoverLocation in ExtensionsConfig, but
+ # this is not always the case. See #1147.
+ if version.uri is None:
+ logger.verbose('The specified manifest URL is empty, ignored.')
+ continue
+
response = None
if not HostPluginProtocol.is_default_channel():
response = self.fetch(version.uri)
diff --git a/azurelinuxagent/common/utils/restutil.py b/azurelinuxagent/common/utils/restutil.py
index 5ceb4c94..fc9aac93 100644
--- a/azurelinuxagent/common/utils/restutil.py
+++ b/azurelinuxagent/common/utils/restutil.py
@@ -170,8 +170,6 @@ def _http_request(method, host, rel_uri, port=None, data=None, secure=False,
headers=None, proxy_host=None, proxy_port=None):
headers = {} if headers is None else headers
- headers['Connection'] = 'close'
-
use_proxy = proxy_host is not None and proxy_port is not None
if port is None:
ExtensionsConfig May Not Contain a failoverLocation Attribute
The agent expects ExtensionsConfig to have a location and failoverLocation for each plugin. This has been proven to not be true for all regions. I consider this to be a bug upstream, but the agent should be robust enough to handle this case.
diff --git a/nbgrader/apps/assignapp.py b/nbgrader/apps/assignapp.py
index 85fcf0e8..ebac9e3b 100644
--- a/nbgrader/apps/assignapp.py
+++ b/nbgrader/apps/assignapp.py
@@ -40,6 +40,10 @@ flags.update({
{'BaseConverter': {'force': True}},
"Overwrite an assignment/submission if it already exists."
),
+ 'f': (
+ {'BaseConverter': {'force': True}},
+ "Overwrite an assignment/submission if it already exists."
+ ),
})
diff --git a/nbgrader/apps/autogradeapp.py b/nbgrader/apps/autogradeapp.py
index 64ef3320..187df53b 100644
--- a/nbgrader/apps/autogradeapp.py
+++ b/nbgrader/apps/autogradeapp.py
@@ -30,6 +30,10 @@ flags.update({
{'BaseConverter': {'force': True}},
"Overwrite an assignment/submission if it already exists."
),
+ 'f': (
+ {'BaseConverter': {'force': True}},
+ "Overwrite an assignment/submission if it already exists."
+ ),
})
diff --git a/nbgrader/apps/dbapp.py b/nbgrader/apps/dbapp.py
index fa0e2c50..0ac5e83c 100644
--- a/nbgrader/apps/dbapp.py
+++ b/nbgrader/apps/dbapp.py
@@ -78,6 +78,10 @@ student_remove_flags.update({
{'DbStudentRemoveApp': {'force': True}},
"Complete the operation, even if it means grades will be deleted."
),
+ 'f': (
+ {'DbStudentRemoveApp': {'force': True}},
+ "Complete the operation, even if it means grades will be deleted."
+ ),
})
class DbStudentRemoveApp(NbGrader):
@@ -314,6 +318,10 @@ assignment_remove_flags.update({
{'DbAssignmentRemoveApp': {'force': True}},
"Complete the operation, even if it means grades will be deleted."
),
+ 'f': (
+ {'DbAssignmentRemoveApp': {'force': True}},
+ "Complete the operation, even if it means grades will be deleted."
+ ),
})
class DbAssignmentRemoveApp(NbGrader):
diff --git a/nbgrader/apps/feedbackapp.py b/nbgrader/apps/feedbackapp.py
index f4bde288..b25a9578 100644
--- a/nbgrader/apps/feedbackapp.py
+++ b/nbgrader/apps/feedbackapp.py
@@ -19,6 +19,10 @@ flags.update({
{'BaseConverter': {'force': True}},
"Overwrite an assignment/submission if it already exists."
),
+ 'f': (
+ {'BaseConverter': {'force': True}},
+ "Overwrite an assignment/submission if it already exists."
+ ),
})
class FeedbackApp(NbGrader):
diff --git a/nbgrader/apps/quickstartapp.py b/nbgrader/apps/quickstartapp.py
index 77154df3..462e1cd7 100644
--- a/nbgrader/apps/quickstartapp.py
+++ b/nbgrader/apps/quickstartapp.py
@@ -26,6 +26,20 @@ flags = {
"""
)
),
+ 'f': (
+ {'QuickStartApp': {'force': True}},
+ dedent(
+ """
+ Overwrite existing files if they already exist. WARNING: this is
+ equivalent to doing:
+
+ rm -r <course_id>
+ nbgrader quickstart <course_id>
+
+ So be careful when using this flag!
+ """
+ )
+ ),
}
class QuickStartApp(NbGrader):
diff --git a/nbgrader/apps/releaseapp.py b/nbgrader/apps/releaseapp.py
index 0968ef4b..c44270cd 100644
--- a/nbgrader/apps/releaseapp.py
+++ b/nbgrader/apps/releaseapp.py
@@ -20,6 +20,10 @@ flags.update({
{'ExchangeRelease' : {'force' : True}},
"Force overwrite of existing files in the exchange."
),
+ 'f': (
+ {'ExchangeRelease' : {'force' : True}},
+ "Force overwrite of existing files in the exchange."
+ ),
})
class ReleaseApp(NbGrader):
diff --git a/nbgrader/apps/zipcollectapp.py b/nbgrader/apps/zipcollectapp.py
index 1183667f..2cac325e 100644
--- a/nbgrader/apps/zipcollectapp.py
+++ b/nbgrader/apps/zipcollectapp.py
@@ -35,6 +35,13 @@ flags = {
},
"Force overwrite of existing files."
),
+ 'f': (
+ {
+ 'ZipCollectApp': {'force': True},
+ 'ExtractorPlugin': {'force': True}
+ },
+ "Force overwrite of existing files."
+ ),
'strict': (
{'ZipCollectApp': {'strict': True}},
"Skip submitted notebooks with invalid names."
Allow nbgrader apps to use -f and --force
Currently only --force is supported, which means you have to do:
```
nbgrader autograde ps1 --force
```
rather than
```
nbgrader autograde ps1 -f
```
Both should be supported flags.
jupyter/nbgrader
diff --git a/nbgrader/tests/apps/test_nbgrader_assign.py b/nbgrader/tests/apps/test_nbgrader_assign.py
index c39d91db..0af3eb6a 100644
--- a/nbgrader/tests/apps/test_nbgrader_assign.py
+++ b/nbgrader/tests/apps/test_nbgrader_assign.py
@@ -126,6 +126,38 @@ class TestNbGraderAssign(BaseTestApp):
assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc"))
+ def test_force_f(self, course_dir):
+ """Ensure the force option works properly"""
+ self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb'))
+ self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo")
+ self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar")
+ self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf")
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name="ps1")]\n""")
+
+ run_nbgrader(["assign", "ps1"])
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'test.ipynb'))
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.txt'))
+ assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'blah.pyc'))
+
+ # check that it skips the existing directory
+ os.remove(join(course_dir, 'release', 'ps1', 'foo.txt'))
+ run_nbgrader(["assign", "ps1"])
+ assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
+
+ # force overwrite the supplemental files
+ run_nbgrader(["assign", "ps1", "-f"])
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
+
+ # force overwrite
+ os.remove(join(course_dir, 'source', 'ps1', 'foo.txt'))
+ run_nbgrader(["assign", "ps1", "-f"])
+ assert os.path.isfile(join(course_dir, "release", "ps1", "test.ipynb"))
+ assert os.path.isfile(join(course_dir, "release", "ps1", "data", "bar.txt"))
+ assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
+ assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc"))
+
def test_permissions(self, course_dir):
"""Are permissions properly set?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
diff --git a/nbgrader/tests/apps/test_nbgrader_autograde.py b/nbgrader/tests/apps/test_nbgrader_autograde.py
index ba44d44b..02cfbcbd 100644
--- a/nbgrader/tests/apps/test_nbgrader_autograde.py
+++ b/nbgrader/tests/apps/test_nbgrader_autograde.py
@@ -335,6 +335,46 @@ class TestNbGraderAutograde(BaseTestApp):
assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt"))
assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"))
+ def test_force_f(self, db, course_dir):
+ """Ensure the force option works properly"""
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
+ run_nbgrader(["assign", "ps1", "--db", db])
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf")
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt"))
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"))
+
+ # check that it skips the existing directory
+ remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["autograde", "ps1", "--db", db])
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+
+ # force overwrite the supplemental files
+ run_nbgrader(["autograde", "ps1", "--db", db, "-f"])
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+
+ # force overwrite
+ remove(join(course_dir, "source", "ps1", "foo.txt"))
+ remove(join(course_dir, "submitted", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["autograde", "ps1", "--db", db, "-f"])
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt"))
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"))
+
def test_filter_notebook(self, db, course_dir):
"""Does autograding filter by notebook properly?"""
with open("nbgrader_config.py", "a") as fh:
diff --git a/nbgrader/tests/apps/test_nbgrader_db.py b/nbgrader/tests/apps/test_nbgrader_db.py
index 5b7789da..9576ecae 100644
--- a/nbgrader/tests/apps/test_nbgrader_db.py
+++ b/nbgrader/tests/apps/test_nbgrader_db.py
@@ -105,7 +105,33 @@ class TestNbGraderDb(BaseTestApp):
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "--force", "--db", db])
- # student should be gone
+ # student should be gone
+ with Gradebook(db) as gb:
+ with pytest.raises(MissingEntry):
+ gb.find_student("foo")
+
+ def test_student_remove_with_submissions_f(self, db, course_dir):
+ run_nbgrader(["db", "student", "add", "foo", "--db", db])
+ run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ run_nbgrader(["assign", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ with Gradebook(db) as gb:
+ gb.find_student("foo")
+
+ # it should fail if we don't run with --force
+ run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
+
+ # make sure we can still find the student
+ with Gradebook(db) as gb:
+ gb.find_student("foo")
+
+ # now force it to complete
+ run_nbgrader(["db", "student", "remove", "foo", "-f", "--db", db])
+
+ # student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
@@ -249,6 +275,32 @@ class TestNbGraderDb(BaseTestApp):
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
+ def test_assignment_remove_with_submissions_f(self, db, course_dir):
+ run_nbgrader(["db", "student", "add", "foo", "--db", db])
+ run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ run_nbgrader(["assign", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ with Gradebook(db) as gb:
+ gb.find_assignment("ps1")
+
+ # it should fail if we don't run with --force
+ run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
+
+ # make sure we can still find the assignment
+ with Gradebook(db) as gb:
+ gb.find_assignment("ps1")
+
+ # now force it to complete
+ run_nbgrader(["db", "assignment", "remove", "ps1", "-f", "--db", db])
+
+ # assignment should be gone
+ with Gradebook(db) as gb:
+ with pytest.raises(MissingEntry):
+ gb.find_assignment("ps1")
+
def test_assignment_list(self, db):
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
run_nbgrader(["db", "assignment", "add", "bar", "--db", db])
diff --git a/nbgrader/tests/apps/test_nbgrader_feedback.py b/nbgrader/tests/apps/test_nbgrader_feedback.py
index 637f11d7..20fb7a75 100644
--- a/nbgrader/tests/apps/test_nbgrader_feedback.py
+++ b/nbgrader/tests/apps/test_nbgrader_feedback.py
@@ -67,6 +67,46 @@ class TestNbGraderFeedback(BaseTestApp):
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
+ def test_force_f(self, db, course_dir):
+ """Ensure the force option works properly"""
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name="ps1")]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo")]\n""")
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
+ run_nbgrader(["assign", "ps1", "--db", db])
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ self._make_file(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"), "asdf")
+ run_nbgrader(["feedback", "ps1", "--db", db])
+
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
+
+ # check that it skips the existing directory
+ remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["feedback", "ps1", "--db", db])
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+
+ # force overwrite the supplemental files
+ run_nbgrader(["feedback", "ps1", "--db", db, "-f"])
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+
+ # force overwrite
+ remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["feedback", "ps1", "--db", db, "--force"])
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
+
def test_filter_notebook(self, db, course_dir):
"""Does feedback filter by notebook properly?"""
with open("nbgrader_config.py", "a") as fh:
diff --git a/nbgrader/tests/apps/test_nbgrader_quickstart.py b/nbgrader/tests/apps/test_nbgrader_quickstart.py
index 1189933c..d9a9f705 100644
--- a/nbgrader/tests/apps/test_nbgrader_quickstart.py
+++ b/nbgrader/tests/apps/test_nbgrader_quickstart.py
@@ -39,3 +39,28 @@ class TestNbGraderQuickStart(BaseTestApp):
# nbgrader assign should work
run_nbgrader(["assign", "ps1"])
+ def test_quickstart_f(self):
+ """Is the quickstart example properly generated?"""
+
+ run_nbgrader(["quickstart", "example"])
+
+ # it should fail if it already exists
+ run_nbgrader(["quickstart", "example"], retcode=1)
+
+ # it should succeed if --force is given
+ os.remove(os.path.join("example", "nbgrader_config.py"))
+ run_nbgrader(["quickstart", "example", "-f"])
+ assert os.path.exists(os.path.join("example", "nbgrader_config.py"))
+
+ # nbgrader validate should work
+ os.chdir("example")
+ for nb in os.listdir(os.path.join("source", "ps1")):
+ if not nb.endswith(".ipynb"):
+ continue
+ output = run_nbgrader(["validate", os.path.join("source", "ps1", nb)], stdout=True)
+ assert output.strip() == "Success! Your notebook passes all the tests."
+
+ # nbgrader assign should work
+ run_nbgrader(["assign", "ps1"])
+
+
diff --git a/nbgrader/tests/apps/test_nbgrader_release.py b/nbgrader/tests/apps/test_nbgrader_release.py
index 0d8bf2dc..830f5955 100644
--- a/nbgrader/tests/apps/test_nbgrader_release.py
+++ b/nbgrader/tests/apps/test_nbgrader_release.py
@@ -53,6 +53,19 @@ class TestNbGraderRelease(BaseTestApp):
self._release("ps1", exchange, flags=["--force"])
assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+ def test_force_release_f(self, exchange, course_dir):
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._release("ps1", exchange)
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
+ self._release("ps1", exchange, retcode=1)
+
+ os.remove(join(exchange, join("abc101", "outbound", "ps1", "p1.ipynb")))
+ self._release("ps1", exchange, retcode=1)
+
+ self._release("ps1", exchange, flags=["-f"])
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
def test_release_with_assignment_flag(self, exchange, course_dir):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
self._release("--assignment=ps1", exchange)
diff --git a/nbgrader/tests/apps/test_nbgrader_zip_collect.py b/nbgrader/tests/apps/test_nbgrader_zip_collect.py
index 58343a55..9f5dca14 100644
--- a/nbgrader/tests/apps/test_nbgrader_zip_collect.py
+++ b/nbgrader/tests/apps/test_nbgrader_zip_collect.py
@@ -72,6 +72,25 @@ class TestNbGraderZipCollect(BaseTestApp):
assert os.path.isdir(extracted_dir)
assert len(os.listdir(extracted_dir)) == 1
+ def test_extract_single_notebook_f(self, course_dir, archive_dir):
+ extracted_dir = join(archive_dir, "..", "extracted")
+ self._make_notebook(archive_dir,
+ 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1')
+
+ run_nbgrader(["zip_collect", "ps1"])
+ assert os.path.isdir(extracted_dir)
+ assert len(os.listdir(extracted_dir)) == 1
+
+ # Run again should fail
+ run_nbgrader(["zip_collect", "ps1"], retcode=1)
+ assert os.path.isdir(extracted_dir)
+ assert len(os.listdir(extracted_dir)) == 1
+
+ # Run again with --force flag should pass
+ run_nbgrader(["zip_collect", "-f", "ps1"])
+ assert os.path.isdir(extracted_dir)
+ assert len(os.listdir(extracted_dir)) == 1
+
def test_extract_sub_dir_single_notebook(self, course_dir, archive_dir):
extracted_dir = join(archive_dir, "..", "extracted")
self._make_notebook(join(archive_dir, 'hacker'),
diff --git a/longbow/applications.py b/longbow/applications.py
index c693b42..8f99c1e 100755
--- a/longbow/applications.py
+++ b/longbow/applications.py
@@ -353,13 +353,15 @@ def _procfiles(job, arg, filelist, foundflags, substitution):
# Otherwise we have a replicate job so check these.
else:
- # Add the repX dir
- if ("rep" + str(rep)) not in filelist:
+ repx = str(job["replicate-naming"]) + str(rep)
- filelist.append("rep" + str(rep))
+ # Add the repx dir
+ if (repx) not in filelist:
+
+ filelist.append(repx)
fileitem = _procfilesreplicatejobs(
- app, arg, job["localworkdir"], initargs, rep)
+ app, arg, job["localworkdir"], initargs, repx)
job["executableargs"] = initargs
@@ -407,21 +409,21 @@ def _procfilessinglejob(app, arg, cwd):
return fileitem
-def _procfilesreplicatejobs(app, arg, cwd, initargs, rep):
+def _procfilesreplicatejobs(app, arg, cwd, initargs, repx):
"""Processor for replicate jobs."""
fileitem = ""
tmpitem = ""
# We should check that the replicate directory structure exists.
- if os.path.isdir(os.path.join(cwd, "rep" + str(rep))) is False:
+ if os.path.isdir(os.path.join(cwd, repx)) is False:
- os.mkdir(os.path.join(cwd, "rep" + str(rep)))
+ os.mkdir(os.path.join(cwd, repx))
# If we have a replicate job then we should check if the file resides
# within ./rep{i} or if it is a global (common to each replicate) file.
- if os.path.isfile(os.path.join(cwd, "rep" + str(rep), arg)):
+ if os.path.isfile(os.path.join(cwd, repx, arg)):
- fileitem = os.path.join("rep" + str(rep), arg)
+ fileitem = os.path.join(repx, arg)
# Otherwise do we have a file in cwd
elif os.path.isfile(os.path.join(cwd, arg)):
@@ -440,7 +442,7 @@ def _procfilesreplicatejobs(app, arg, cwd, initargs, rep):
try:
tmpitem, _ = getattr(apps, app.lower()).defaultfilename(
- cwd, os.path.join("rep" + str(rep), arg), "")
+ cwd, os.path.join(repx, arg), "")
except AttributeError:
diff --git a/longbow/configuration.py b/longbow/configuration.py
index 2e82db1..b35c5c8 100755
--- a/longbow/configuration.py
+++ b/longbow/configuration.py
@@ -103,6 +103,7 @@ JOBTEMPLATE = {
"remoteworkdir": "",
"resource": "",
"replicates": "1",
+ "replicate-naming": "rep",
"scheduler": "",
"scripts": "",
"slurm-gres": "",
Allow replicate directory naming schemes
At the moment users have to have a specific fixed structure for replicates where the directory consists of rep[x] where the rep part is fixed and the number is incremented. Users have requested that the rep part is flexible so Longbow can be better chained with other tools.
diff --git a/conans/client/build/cmake.py b/conans/client/build/cmake.py
index 2702d2b9e..3a2494873 100644
--- a/conans/client/build/cmake.py
+++ b/conans/client/build/cmake.py
@@ -115,8 +115,11 @@ class CMake(object):
return os.environ["CONAN_CMAKE_GENERATOR"]
if not self._compiler or not self._compiler_version or not self._arch:
- raise ConanException("You must specify compiler, compiler.version and arch in "
- "your settings to use a CMake generator")
+ if self._os_build == "Windows":
+ raise ConanException("You must specify compiler, compiler.version and arch in "
+ "your settings to use a CMake generator. You can also declare "
+ "the env variable CONAN_CMAKE_GENERATOR.")
+ return "Unix Makefiles"
if self._compiler == "Visual Studio":
_visuals = {'8': '8 2005',
Settings friction review
I've been experimenting with non-common settings for embedded devices and I've found a couple of issues:
1. We have to review why the CMake() build helper is raising in case it doesn't have compiler or architecture. It is very very uncomfortable to specify the settings and deleting them later for the package_id just to avoid the error. And it makes no sense.
2. If I install a `conanfile.txt` but without specifying `os` setting, it is crashing in the settings.py validate() method. Because somehow it has the os in the fields but not in the data.
conan-io/conan
diff --git a/conans/test/build_helpers/cmake_test.py b/conans/test/build_helpers/cmake_test.py
index 85d1ba227..ff1802915 100644
--- a/conans/test/build_helpers/cmake_test.py
+++ b/conans/test/build_helpers/cmake_test.py
@@ -773,6 +773,29 @@ build_type: [ Release]
cmake = CMake(conan_file)
self.assertIn('-T "v140"', cmake.command_line)
+
+ def test_missing_settings(self):
+ def instance_with_os_build(os_build):
+ settings = Settings.loads(default_settings_yml)
+ settings.os_build = os_build
+ conan_file = ConanFileMock()
+ conan_file.settings = settings
+ return CMake(conan_file)
+
+ cmake = instance_with_os_build("Linux")
+ self.assertEquals(cmake.generator, "Unix Makefiles")
+
+ cmake = instance_with_os_build("Macos")
+ self.assertEquals(cmake.generator, "Unix Makefiles")
+
+ with self.assertRaisesRegexp(ConanException, "You must specify compiler, "
+ "compiler.version and arch"):
+ instance_with_os_build("Windows")
+
+ with tools.environment_append({"CONAN_CMAKE_GENERATOR": "MyCoolGenerator"}):
+ cmake = instance_with_os_build("Windows")
+ self.assertEquals(cmake.generator, "MyCoolGenerator")
+
def test_cmake_system_version_android(self):
with tools.environment_append({"CONAN_CMAKE_SYSTEM_NAME": "SomeSystem",
"CONAN_CMAKE_GENERATOR": "SomeGenerator"}):
diff --git a/conans/test/integration/cmake_flags_test.py b/conans/test/integration/cmake_flags_test.py
index a74a320ba..2af97aeff 100644
--- a/conans/test/integration/cmake_flags_test.py
+++ b/conans/test/integration/cmake_flags_test.py
@@ -254,10 +254,13 @@ class MyLib(ConanFile):
client = TestClient()
client.save({"conanfile.py": conanfile % settings_line})
client.run("install .")
- client.run("build .", ignore_error=True)
-
- self.assertIn("You must specify compiler, compiler.version and arch in "
- "your settings to use a CMake generator", client.user_io.out,)
+ error = client.run("build .", ignore_error=True)
+ if platform.system() == "Windows":
+ self.assertTrue(error)
+ self.assertIn("You must specify compiler, compiler.version and arch in "
+ "your settings to use a CMake generator", client.user_io.out,)
+ else:
+ self.assertFalse(error)
def cmake_shared_flag_test(self):
conanfile = """
diff --git a/pyclarity_lims/lims.py b/pyclarity_lims/lims.py
index c00b1a1..532b315 100644
--- a/pyclarity_lims/lims.py
+++ b/pyclarity_lims/lims.py
@@ -210,7 +210,8 @@ class Lims(object):
root = ElementTree.fromstring(response.content)
return root
- def get_udfs(self, name=None, attach_to_name=None, attach_to_category=None, start_index=None, add_info=False):
+ def get_udfs(self, name=None, attach_to_name=None, attach_to_category=None, start_index=None, nb_pages=-1,
+ add_info=False):
"""Get a list of udfs, filtered by keyword arguments.
:param name: name of udf
@@ -218,7 +219,9 @@ class Lims(object):
Sample, Project, Container, or the name of a process.
:param attach_to_category: If 'attach_to_name' is the name of a process, such as 'CaliperGX QC (DNA)',
then you need to set attach_to_category='ProcessType'. Must not be provided otherwise.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
"""
@@ -226,21 +229,23 @@ class Lims(object):
attach_to_name=attach_to_name,
attach_to_category=attach_to_category,
start_index=start_index)
- return self._get_instances(Udfconfig, add_info=add_info, params=params)
+ return self._get_instances(Udfconfig, add_info=add_info, nb_pages=nb_pages, params=params)
- def get_reagent_types(self, name=None, start_index=None):
+ def get_reagent_types(self, name=None, start_index=None, nb_pages=-1):
"""
Get a list of reagent types, filtered by keyword arguments.
:param name: Reagent type name, or list of names.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(name=name,
start_index=start_index)
- return self._get_instances(ReagentType, params=params)
+ return self._get_instances(ReagentType, nb_pages=nb_pages, params=params)
def get_labs(self, name=None, last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None, add_info=False):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1, add_info=False):
"""Get a list of labs, filtered by keyword arguments.
:param name: Lab name, or list of names.
@@ -249,7 +254,9 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
"""
@@ -257,11 +264,11 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Lab, add_info=add_info, params=params)
+ return self._get_instances(Lab, add_info=add_info, nb_pages=nb_pages, params=params)
def get_researchers(self, firstname=None, lastname=None, username=None,
last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
add_info=False):
"""Get a list of researchers, filtered by keyword arguments.
@@ -273,7 +280,9 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
@@ -284,10 +293,10 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Researcher, add_info=add_info, params=params)
+ return self._get_instances(Researcher, add_info=add_info, nb_pages=nb_pages, params=params)
def get_projects(self, name=None, open_date=None, last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
add_info=False):
"""Get a list of projects, filtered by keyword arguments.
@@ -298,7 +307,9 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
@@ -308,14 +319,16 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Project, add_info=add_info, params=params)
+ return self._get_instances(Project, add_info=add_info, nb_pages=nb_pages, params=params)
def get_sample_number(self, name=None, projectname=None, projectlimsid=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1):
"""
Gets the number of samples matching the query without fetching every
sample, so it should be faster than len(get_samples())
"""
+ # TODO: I doubt that this make any difference in terms of speed since the only thing it save is the Sample
+ # construction. We should test and a replace with len(get_samples())
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
@@ -331,7 +344,7 @@ class Lims(object):
return total
def get_samples(self, name=None, projectname=None, projectlimsid=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1):
"""Get a list of samples, filtered by keyword arguments.
:param name: Sample name, or list of names.
@@ -341,21 +354,22 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
-
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Sample, params=params)
+ return self._get_instances(Sample, nb_pages=nb_pages, params=params)
def get_artifacts(self, name=None, type=None, process_type=None,
artifact_flag_name=None, working_flag=None, qc_flag=None,
sample_name=None, samplelimsid=None, artifactgroup=None, containername=None,
containerlimsid=None, reagent_label=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
resolve=False):
"""Get a list of artifacts, filtered by keyword arguments.
@@ -375,9 +389,10 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param resolve: Send a batch query to the lims to get the content of all artifacts retrieved
-
"""
params = self._get_params(name=name,
type=type,
@@ -394,13 +409,13 @@ class Lims(object):
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
if resolve:
- return self.get_batch(self._get_instances(Artifact, params=params))
+ return self.get_batch(self._get_instances(Artifact, nb_pages=nb_pages, params=params))
else:
- return self._get_instances(Artifact, params=params)
+ return self._get_instances(Artifact, nb_pages=nb_pages, params=params)
def get_containers(self, name=None, type=None,
state=None, last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
add_info=False):
"""Get a list of containers, filtered by keyword arguments.
@@ -412,10 +427,11 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
-
"""
params = self._get_params(name=name,
type=type,
@@ -423,24 +439,25 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Container, add_info=add_info, params=params)
+ return self._get_instances(Container, add_info=add_info, nb_pages=nb_pages, params=params)
- def get_container_types(self, name=None, start_index=None, add_info=False):
+ def get_container_types(self, name=None, start_index=None, nb_pages=-1, add_info=False):
"""Get a list of container types, filtered by keyword arguments.
:param name: name of the container type or list of names.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
-
"""
params = self._get_params(name=name, start_index=start_index)
- return self._get_instances(Containertype, add_info=add_info, params=params)
+ return self._get_instances(Containertype, add_info=add_info, nb_pages=nb_pages, params=params)
def get_processes(self, last_modified=None, type=None,
inputartifactlimsid=None,
techfirstname=None, techlastname=None, projectname=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1):
"""Get a list of processes, filtered by keyword arguments.
:param last_modified: Since the given ISO format datetime.
@@ -453,7 +470,9 @@ class Lims(object):
:param techfirstname: First name of researcher, or list of.
:param techlastname: Last name of researcher, or list of.
:param projectname: Name of project, or list of.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(last_modified=last_modified,
type=type,
@@ -463,7 +482,7 @@ class Lims(object):
projectname=projectname,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Process, params=params)
+ return self._get_instances(Process, nb_pages=nb_pages, params=params)
def get_workflows(self, name=None, add_info=False):
"""
@@ -513,32 +532,35 @@ class Lims(object):
params = self._get_params(name=name)
return self._get_instances(Protocol, add_info=add_info, params=params)
- def get_reagent_kits(self, name=None, start_index=None, add_info=False):
+ def get_reagent_kits(self, name=None, start_index=None, nb_pages=-1, add_info=False):
"""Get a list of reagent kits, filtered by keyword arguments.
:param name: reagent kit name, or list of names.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
"""
params = self._get_params(name=name,
start_index=start_index)
- return self._get_instances(ReagentKit, add_info=add_info, params=params)
+ return self._get_instances(ReagentKit, add_info=add_info, nb_pages=nb_pages, params=params)
def get_reagent_lots(self, name=None, kitname=None, number=None,
- start_index=None):
+ start_index=None, nb_pages=-1):
"""Get a list of reagent lots, filtered by keyword arguments.
:param name: reagent kit name, or list of names.
:param kitname: name of the kit this lots belong to
:param number: lot number or list of lot number
- :param start_index: Page to retrieve; all if None.
-
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(name=name, kitname=kitname, number=number,
start_index=start_index)
- return self._get_instances(ReagentLot, params=params)
+ return self._get_instances(ReagentLot, nb_pages=nb_pages, params=params)
def _get_params(self, **kwargs):
"""Convert keyword arguments to a kwargs dictionary."""
@@ -560,14 +582,15 @@ class Lims(object):
result["udt.%s" % key] = value
return result
- def _get_instances(self, klass, add_info=None, params=dict()):
+ def _get_instances(self, klass, add_info=None, nb_pages=-1, params=dict()):
results = []
additionnal_info_dicts = []
tag = klass._TAG
if tag is None:
tag = klass.__name__.lower()
root = self.get(self.get_uri(klass._URI), params=params)
- while params.get('start-index') is None: # Loop over all pages.
+ while root: # Loop over all requested pages.
+ nb_pages -= 1
for node in root.findall(tag):
results.append(klass(self, uri=node.attrib['uri']))
info_dict = {}
@@ -577,9 +600,10 @@ class Lims(object):
info_dict[subnode.tag] = subnode.text
additionnal_info_dicts.append(info_dict)
node = root.find('next-page')
- if node is None:
- break
- root = self.get(node.attrib['uri'], params=params)
+ if node is None or nb_pages == 0:
+ root = None
+ else:
+ root = self.get(node.attrib['uri'], params=params)
if add_info:
return results, additionnal_info_dicts
else:
Lims _get_instances() returns empty array when start_index is set.
An empty array is always returned when retrieving a list of entities using the `get_*` methods of the`Lims` class. For example:
```
samples = l.get_samples(start_index=500)
# samples == []
```
The problem is in the [_get_instances()](https://github.com/EdinburghGenomics/pyclarity-lims/blob/master/pyclarity_lims/lims.py#L563-L586) method. The response is only parsed when `start_index` is `None`.
diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py
index 12c14bd..cb5b6b6 100644
--- a/mobly/controllers/android_device_lib/adb.py
+++ b/mobly/controllers/android_device_lib/adb.py
@@ -237,7 +237,8 @@ class AdbProxy(object):
def forward(self, args=None, shell=False):
with ADB_PORT_LOCK:
- return self._exec_adb_cmd('forward', args, shell, timeout=None)
+ return self._exec_adb_cmd(
+ 'forward', args, shell, timeout=None, stderr=None)
def instrument(self, package, options=None, runner=None):
"""Runs an instrumentation command on the device.
`current_test_info` should exist between `setup_class` and `setup_test`
Right now `current_test_info` is None between `setup_class` and `setup_test`, which makes it difficult to use this field consistently.
E.g. if a test relies on this field in `on_fail`, if `setup_class` fails, the logic in `on_fail` would raise an exception for any call to `current_test_info`.
google/mobly
diff --git a/mobly/base_test.py b/mobly/base_test.py
index e4e047b..13a79b0 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -624,6 +624,10 @@ class BaseTestClass(object):
tests = self._get_test_methods(test_names)
try:
# Setup for the class.
+ class_record = records.TestResultRecord('setup_class', self.TAG)
+ class_record.test_begin()
+ self.current_test_info = runtime_test_info.RuntimeTestInfo(
+ 'setup_class', self.log_path, class_record)
try:
self._setup_class()
except signals.TestAbortSignal:
@@ -633,9 +637,6 @@ class BaseTestClass(object):
# Setup class failed for unknown reasons.
# Fail the class and skip all tests.
logging.exception('Error in setup_class %s.', self.TAG)
- class_record = records.TestResultRecord(
- 'setup_class', self.TAG)
- class_record.test_begin()
class_record.test_error(e)
self._exec_procedure_func(self._on_fail, class_record)
self.results.add_class_error(class_record)
diff --git a/mobly/runtime_test_info.py b/mobly/runtime_test_info.py
index f4eea99..57b0742 100644
--- a/mobly/runtime_test_info.py
+++ b/mobly/runtime_test_info.py
@@ -19,10 +19,13 @@ from mobly import utils
class RuntimeTestInfo(object):
- """Container class for runtime information of a test.
+ """Container class for runtime information of a test or test stage.
One object corresponds to one test. This is meant to be a read-only class.
+ This also applies to test stages like `setup_class`, which has its own
+ runtime info but is not part of any single test.
+
Attributes:
name: string, name of the test.
signature: string, an identifier of the test, a combination of test
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index d78a640..a38b532 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -91,6 +91,25 @@ class BaseTestTest(unittest.TestCase):
self.assertIsNone(actual_record.details)
self.assertIsNone(actual_record.extras)
+ def test_current_test_info_in_setup_class(self):
+ class MockBaseTest(base_test.BaseTestClass):
+ def setup_class(self):
+ asserts.assert_true(
+ self.current_test_info.name == 'setup_class',
+ 'Got unexpected test name %s.' %
+ self.current_test_info.name)
+ output_path = self.current_test_info.output_path
+ asserts.assert_true(
+ os.path.exists(output_path), 'test output path missing')
+ raise Exception(MSG_EXPECTED_EXCEPTION)
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ actual_record = bt_cls.results.error[0]
+ self.assertEqual(actual_record.test_name, 'setup_class')
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertIsNone(actual_record.extras)
+
def test_self_tests_list(self):
class MockBaseTest(base_test.BaseTestClass):
def __init__(self, controllers):
diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py
index 7bf61ab..cf699ce 100755
--- a/tests/mobly/controllers/android_device_lib/adb_test.py
+++ b/tests/mobly/controllers/android_device_lib/adb_test.py
@@ -173,6 +173,10 @@ class AdbTest(unittest.TestCase):
self.assertEqual(MOCK_DEFAULT_STDERR,
stderr_redirect.getvalue().decode('utf-8'))
+ def test_forward(self):
+ with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ adb.AdbProxy().forward(MOCK_SHELL_COMMAND)
+
def test_instrument_without_parameters(self):
"""Verifies the AndroidDevice object's instrument command is correct in
the basic case.
hugovk: Passing CI build: https://travis-ci.org/hugovk/pypinfo/builds/379256544
ofek: Thanks so much!
diff --git a/pypinfo/core.py b/pypinfo/core.py
index f1ba663..5c633a4 100644
--- a/pypinfo/core.py
+++ b/pypinfo/core.py
@@ -12,13 +12,17 @@ FROM = """\
FROM
TABLE_DATE_RANGE(
[the-psf:pypi.downloads],
- DATE_ADD(CURRENT_TIMESTAMP(), {}, "day"),
- DATE_ADD(CURRENT_TIMESTAMP(), {}, "day")
+ {},
+ {}
)
"""
+DATE_ADD = 'DATE_ADD(CURRENT_TIMESTAMP(), {}, "day")'
+START_TIMESTAMP = 'TIMESTAMP("{} 00:00:00")'
+END_TIMESTAMP = 'TIMESTAMP("{} 23:59:59")'
START_DATE = '-31'
END_DATE = '-1'
DEFAULT_LIMIT = '10'
+YYYY_MM_DD = re.compile("^[0-9]{4}-[01][0-9]-[0-3][0-9]$")
def create_config():
@@ -42,6 +46,28 @@ def create_client(creds_file=None):
return Client.from_service_account_json(creds_file, project=project)
+def validate_date(date):
+ valid = False
+ try:
+ if int(date) < 0:
+ valid = True
+ except ValueError:
+ if YYYY_MM_DD.match(date):
+ valid = True
+
+ if not valid:
+ raise ValueError('Dates must be negative integers or YYYY-MM-DD in the past.')
+ return valid
+
+
+def format_date(date, timestamp_format):
+ try:
+ date = DATE_ADD.format(int(date))
+ except ValueError:
+ date = timestamp_format.format(date)
+ return date
+
+
def build_query(project, all_fields, start_date=None, end_date=None,
days=None, limit=None, where=None, order=None, pip=None):
project = normalize(project)
@@ -53,11 +79,18 @@ def build_query(project, all_fields, start_date=None, end_date=None,
if days:
start_date = str(int(end_date) - int(days))
- if int(start_date) > 0 or int(end_date) > 0:
- raise ValueError('Dates must be in the past (negative).')
+ validate_date(start_date)
+ validate_date(end_date)
+
+ try:
+ if int(start_date) >= int(end_date):
+ raise ValueError('End date must be greater than start date.')
+ except ValueError:
+ # Not integers, must be yyyy-mm-dd
+ pass
- if int(start_date) >= int(end_date):
- raise ValueError('End date must be greater than start date.')
+ start_date = format_date(start_date, START_TIMESTAMP)
+ end_date = format_date(end_date, END_TIMESTAMP)
fields = []
used_fields = set()
Allow YYYY-MM-DD dates in --start-date and --end-date
It'd be handy to be able to use `YYYY-MM-DD` dates as the start and end date. For example:
```console
$ pypinfo --start-date 2018-01-01 --end-date 2018-01-31 pillow pyversion
```
Rather than having to work it out:
```console
$ pypinfo --start-date -43 --end-date -14 pillow pyversion
```
It wouldn't necessarily have to reuse `--start-date` and `--end-date`, but that's probably clearest and easiest (if not negative integer, it's a date).
What do you think?
LCI not covered by TF1 home page anymore
### Checklist
- [ ] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
I pointed out since a while that tf1 plugin doesn't cover anymore LCI from tf1.fr home website. It did before.
### Expected / Actual behavior
Here I point out result with older tf1 plugin :
```
streamlink "https://www.tf1.fr/lci/direct"
[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/lci/direct
error: Unable to open URL: http://lcilivhlshdslive-lh.akamaihd.net/z/lci_1@30158
5/manifest.f4m?hdnea=st=1526479986~exp=1526481786~acl=/*~hmac=207f41547435bb3422
e9f51af166cae855bdbb387ac875524827deb528999d9e (403 Client Error: Forbidden for
url: http://lcilivhlshdslive-lh.akamaihd.net/z/lci_1@301585/manifest.f4m?hdnea=s
t=1526479986~exp=1526481786~acl=/*~hmac=207f41547435bb3422e9f51af166cae855bdbb38
7ac875524827deb528999d9e&g=DSCLJVQYJHGR&hdcore=3.1.0)
```
The latest tf1 plugin gives such result :
```
streamlink "https://www.tf1.fr/lci/direct"
[cli][info] Found matching plugin resolve for URL https://www.tf1.fr/lci/direct
[plugin.resolve][info] Found iframes:
Traceback (most recent call last):
File "C:\Program Files\Python27\Scripts\streamlink-script.py", line 11, in <mo
dule>
load_entry_point('streamlink==0.12.1+8.ge2a5546', 'console_scripts', 'stream
link')()
File "c:\program files\python27\lib\site-packages\streamlink_cli\main.py", lin
e 1113, in main
handle_url()
File "c:\program files\python27\lib\site-packages\streamlink_cli\main.py", lin
e 505, in handle_url
streams = fetch_streams(plugin)
File "c:\program files\python27\lib\site-packages\streamlink_cli\main.py", lin
e 402, in fetch_streams
sorting_excludes=args.stream_sorting_excludes)
File "c:\program files\python27\lib\site-packages\streamlink\plugin\plugin.py"
, line 385, in get_streams
return self.streams(*args, **kwargs)
File "c:\program files\python27\lib\site-packages\streamlink\plugin\plugin.py"
, line 288, in streams
ostreams = self._get_streams()
File "c:\program files\python27\lib\site-packages\streamlink\plugins\resolve.p
y", line 480, in _get_streams
IndexError: list index out of range
```
### Reproduction steps / Explicit stream URLs to test
1. ` streamlink "https://www.tf1.fr/lci/direct"`
### Logs
```
streamlink -l debug
[cli][debug] OS: Windows 7
[cli][debug] Python: 2.7.13
[cli][debug] Streamlink: 0.12.1+8.ge2a5546
[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.46.0)
```
Both tests were made with the latest streamlink build, I just replaced newer tf1 plugin with older.
Thanks for up.
streamlink/streamlink
diff --git a/tests/test_plugin_tf1.py b/tests/test_plugin_tf1.py
index 77afd8d8..f8e48790 100644
--- a/tests/test_plugin_tf1.py
+++ b/tests/test_plugin_tf1.py
@@ -12,11 +12,11 @@ class TestPluginTF1(unittest.TestCase):
self.assertTrue(TF1.can_handle_url("http://lci.fr/direct"))
self.assertTrue(TF1.can_handle_url("http://www.lci.fr/direct"))
self.assertTrue(TF1.can_handle_url("http://tf1.fr/tmc/direct"))
+ self.assertTrue(TF1.can_handle_url("http://tf1.fr/lci/direct"))
+ def test_can_handle_url_negative(self):
# shouldn't match
self.assertFalse(TF1.can_handle_url("http://tf1.fr/direct"))
-# self.assertFalse(TF1.can_handle_url("http://tf1.fr/nt1/direct")) NOTE : TF1 redirect old channel names to new ones (for now).
-# self.assertFalse(TF1.can_handle_url("http://tf1.fr/hd1/direct"))
self.assertFalse(TF1.can_handle_url("http://www.tf1.fr/direct"))
self.assertFalse(TF1.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(TF1.can_handle_url("http://www.youtube.com/"))
diff --git a/swagger_spec_validator/validator20.py b/swagger_spec_validator/validator20.py
index fe17ded..77920c1 100644
--- a/swagger_spec_validator/validator20.py
+++ b/swagger_spec_validator/validator20.py
@@ -7,6 +7,7 @@ from __future__ import unicode_literals
import functools
import logging
import string
+from collections import defaultdict
from jsonschema.validators import Draft4Validator
from jsonschema.validators import RefResolver
@@ -196,6 +197,8 @@ def validate_apis(apis, deref):
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
:raises: :py:class:`jsonschema.exceptions.ValidationError`
"""
+ operation_tag_to_operation_id_set = defaultdict(set)
+
for api_name, api_body in iteritems(apis):
api_body = deref(api_body)
api_params = deref(api_body.get('parameters', []))
@@ -206,6 +209,20 @@ def validate_apis(apis, deref):
if oper_name == 'parameters' or oper_name.startswith('x-'):
continue
oper_body = deref(api_body[oper_name])
+ oper_tags = deref(oper_body.get('tags', [None]))
+
+ # Check that, if this operation has an operationId defined,
+ # no other operation with a same tag also has that
+ # operationId.
+ operation_id = oper_body.get('operationId')
+ if operation_id is not None:
+ for oper_tag in oper_tags:
+ if operation_id in operation_tag_to_operation_id_set[oper_tag]:
+ raise SwaggerValidationError(
+ "Duplicate operationId: {}".format(operation_id)
+ )
+ operation_tag_to_operation_id_set[oper_tag].add(operation_id)
+
oper_params = deref(oper_body.get('parameters', []))
validate_duplicate_param(oper_params, deref)
all_path_params = list(set(
Validator does not check uniqueness of operation ids
According to the Swagger spec, the `operationId` of an operation object is:
> Unique string used to identify the operation. The id MUST be unique among all operations described in the API. Tools and libraries MAY use the operationId to uniquely identify an operation, therefore, it is recommended to follow common programming naming conventions.
The validator does not currently check that `operationId`s are unique across the API. This would be a helpful feature because some codegen tools fail if this constraint is not met.
diff --git a/hapic/context.py b/hapic/context.py
index 97aa0c4..7b5d9b9 100644
--- a/hapic/context.py
+++ b/hapic/context.py
@@ -135,6 +135,15 @@ class ContextInterface(object):
"""
raise NotImplementedError()
+ def is_debug(self) -> bool:
+ """
+ Method called to know if Hapic has been called in debug mode.
+ Debug mode provide some informations like debug trace and error
+ message in body when internal error happen.
+ :return: True if in debug mode
+ """
+ raise NotImplementedError()
+
class HandledException(object):
"""
diff --git a/hapic/decorator.py b/hapic/decorator.py
index 10c6036..8d7f284 100644
--- a/hapic/decorator.py
+++ b/hapic/decorator.py
@@ -420,7 +420,10 @@ class ExceptionHandlerControllerWrapper(ControllerWrapper):
func_kwargs,
)
except self.handled_exception_class as exc:
- response_content = self.error_builder.build_from_exception(exc)
+ response_content = self.error_builder.build_from_exception(
+ exc,
+ include_traceback=self.context.is_debug(),
+ )
# Check error format
dumped = self.error_builder.dump(response_content).data
diff --git a/hapic/error.py b/hapic/error.py
index 9157657..073b849 100644
--- a/hapic/error.py
+++ b/hapic/error.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+import traceback
+
import marshmallow
from hapic.processor import ProcessValidationError
@@ -9,7 +11,11 @@ class ErrorBuilderInterface(marshmallow.Schema):
ErrorBuilder is a class who represent a Schema (marshmallow.Schema) and
can generate a response content from exception (build_from_exception)
"""
- def build_from_exception(self, exception: Exception) -> dict:
+ def build_from_exception(
+ self,
+ exception: Exception,
+ include_traceback: bool = False,
+ ) -> dict:
"""
Build the error response content from given exception
:param exception: Original exception who invoke this method
@@ -34,14 +40,28 @@ class DefaultErrorBuilder(ErrorBuilderInterface):
details = marshmallow.fields.Dict(required=False, missing={})
code = marshmallow.fields.Raw(missing=None)
- def build_from_exception(self, exception: Exception) -> dict:
+ def build_from_exception(
+ self,
+ exception: Exception,
+ include_traceback: bool = False,
+ ) -> dict:
"""
See hapic.error.ErrorBuilderInterface#build_from_exception docstring
"""
# TODO: "error_detail" attribute name should be configurable
+ message = str(exception)
+ if not message:
+ message = type(exception).__name__
+
+ details = {
+ 'error_detail': getattr(exception, 'error_detail', {}),
+ }
+ if include_traceback:
+ details['traceback'] = traceback.format_exc()
+
return {
- 'message': str(exception),
- 'details': getattr(exception, 'error_detail', {}),
+ 'message': message,
+ 'details': details,
'code': None,
}
diff --git a/hapic/ext/bottle/context.py b/hapic/ext/bottle/context.py
index c5090b8..ba8d75a 100644
--- a/hapic/ext/bottle/context.py
+++ b/hapic/ext/bottle/context.py
@@ -33,12 +33,14 @@ class BottleContext(BaseContext):
self,
app: bottle.Bottle,
default_error_builder: ErrorBuilderInterface=None,
+ debug: bool = False,
):
self._handled_exceptions = [] # type: typing.List[HandledException] # nopep8
self._exceptions_handler_installed = False
self.app = app
self.default_error_builder = \
default_error_builder or DefaultErrorBuilder() # FDV
+ self.debug = debug
def get_request_parameters(self, *args, **kwargs) -> RequestParameters:
path_parameters = dict(bottle.request.url_args)
@@ -164,3 +166,6 @@ class BottleContext(BaseContext):
See hapic.context.BaseContext#_get_handled_exception_class_and_http_codes # nopep8
"""
return self._handled_exceptions
+
+ def is_debug(self) -> bool:
+ return self.debug
diff --git a/hapic/ext/flask/context.py b/hapic/ext/flask/context.py
index 0908dc2..b548d11 100644
--- a/hapic/ext/flask/context.py
+++ b/hapic/ext/flask/context.py
@@ -32,11 +32,13 @@ class FlaskContext(BaseContext):
self,
app: Flask,
default_error_builder: ErrorBuilderInterface=None,
+ debug: bool = False,
):
self._handled_exceptions = [] # type: typing.List[HandledException] # nopep8
self.app = app
self.default_error_builder = \
default_error_builder or DefaultErrorBuilder() # FDV
+ self.debug = debug
def get_request_parameters(self, *args, **kwargs) -> RequestParameters:
from flask import request
@@ -165,3 +167,6 @@ class FlaskContext(BaseContext):
http_code: int,
) -> None:
raise NotImplementedError('TODO')
+
+ def is_debug(self) -> bool:
+ return self.debug
diff --git a/hapic/ext/pyramid/context.py b/hapic/ext/pyramid/context.py
index d39b615..6fcde49 100644
--- a/hapic/ext/pyramid/context.py
+++ b/hapic/ext/pyramid/context.py
@@ -31,11 +31,13 @@ class PyramidContext(BaseContext):
self,
configurator: 'Configurator',
default_error_builder: ErrorBuilderInterface = None,
+ debug: bool = False,
):
self._handled_exceptions = [] # type: typing.List[HandledException] # nopep8
self.configurator = configurator
self.default_error_builder = \
default_error_builder or DefaultErrorBuilder() # FDV
+ self.debug = debug
def get_request_parameters(self, *args, **kwargs) -> RequestParameters:
req = args[-1] # TODO : Check
@@ -189,3 +191,6 @@ class PyramidContext(BaseContext):
http_code: int,
) -> None:
raise NotImplementedError('TODO')
+
+ def is_debug(self) -> bool:
+ return self.debug
Error catching: erro details must be given by default
When error is catch and transformed to response, error information must be hidden by default. A parameter like "debug" should be able to return it.
diff --git a/serpentTools/objects/containers.py b/serpentTools/objects/containers.py
index 8c30e1b..bcc9103 100644
--- a/serpentTools/objects/containers.py
+++ b/serpentTools/objects/containers.py
@@ -35,8 +35,8 @@ for xsSpectrum, xsType in product({'INF', 'B1'},
for xx in range(SCATTER_ORDERS)})
HOMOG_VAR_TO_ATTR = {
- 'MICRO_E': 'microGroups', 'MICRO_NG': '_numMicroGroups',
- 'MACRO_E': 'groups', 'MACRO_NG': '_numGroups'}
+ 'MICRO_E': 'microGroups', 'MICRO_NG': 'numMicroGroups',
+ 'MACRO_E': 'groups', 'MACRO_NG': 'numGroups'}
__all__ = ('DET_COLS', 'HomogUniv', 'BranchContainer', 'Detector',
'DetectorBase', 'SCATTER_MATS', 'SCATTER_ORDERS')
@@ -147,12 +147,22 @@ class HomogUniv(NamedObject):
self._numGroups = self.groups.size - 1
return self._numGroups
+ @numGroups.setter
+ def numGroups(self, value):
+ value = value if isinstance(value, int) else int(value)
+ self._numGroups = value
+
@property
def numMicroGroups(self):
if self._numMicroGroups is None and self.microGroups is not None:
self._numMicroGroups = self.microGroups.size - 1
return self._numMicroGroups
+ @numMicroGroups.setter
+ def numMicroGroups(self, value):
+ value = value if isinstance(value, int) else int(value)
+ self._numMicroGroups = value
+
def __str__(self):
extras = []
if self.bu is not None:
[BUG] number of groups stored as a float; causes reshape scatter matrices to fail
## Summary of issue
The `addData` routine stores the number of energy groups as a float. This causes numpy to fail during the reshaping of scattering matrices.
## Code for reproducing the issue
```
import serpentTools
from serpentTools.settings import rc
rc['xs.reshapeScatter'] = True
r = serpentTools.read('bwr_res.m')
```
## Actual outcome including console output and error traceback if applicable
```
~/.local/lib/python3.5/site-packages/serpentTools-0.4.0+9.g277cb89-py3.5.egg/serpentTools/objects/containers.py in addData(self, variableName, variableValue, uncertainty)
200 'should be boolean.'.format(type(uncertainty)))
201
--> 202 value = self._cleanData(variableName, variableValue)
203 if variableName in HOMOG_VAR_TO_ATTR:
204 value = value if variableValue.size > 1 else value[0]
~/.local/lib/python3.5/site-packages/serpentTools-0.4.0+9.g277cb89-py3.5.egg/serpentTools/objects/containers.py in _cleanData(self, name, value)
233 .format(name))
234 else:
--> 235 value = value.reshape(ng, ng)
236 return value
237
TypeError: 'numpy.float64' object cannot be interpreted as an integer
```
## Expected outcome
No error and scattering matrices are reshaped properly
## Versions
* Version from ``serpentTools.__version__`` `0.4.0+9.g277cb89`
* Python version - ``python --version`` `3.5`
* IPython or Jupyter version if applicable - `ipython 6.2.1`
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/test_container.py b/serpentTools/tests/test_container.py
index 721dd1d..ded8988 100644
--- a/serpentTools/tests/test_container.py
+++ b/serpentTools/tests/test_container.py
@@ -4,7 +4,7 @@ import unittest
from itertools import product
from six import iteritems
-from numpy import array, arange, ndarray
+from numpy import array, arange, ndarray, float64
from numpy.testing import assert_array_equal
from serpentTools.settings import rc
@@ -171,6 +171,37 @@ class UnivTruthTester(unittest.TestCase):
self.assertTrue(univ.hasData, msg=msg)
+class HomogUnivIntGroupsTester(unittest.TestCase):
+ """Class that ensures number of groups is stored as ints."""
+
+ def setUp(self):
+ self.univ = HomogUniv('intGroups', 0, 0, 0)
+ self.numGroups = 2
+ self.numMicroGroups = 4
+
+ def test_univGroupsFromFloats(self):
+ """Vefify integer groups are stored when passed as floats."""
+ self.setAs(float)
+ self._tester()
+
+ def test_univGroupsFromNPFloats(self):
+ """Vefify integer groups are stored when passed as numpy floats."""
+ self.setAs(float64)
+ self._tester()
+
+ def _tester(self):
+ for attr in {'numGroups', 'numMicroGroups'}:
+ actual = getattr(self.univ, attr)
+ msg ='Attribute: {}'.format(attr)
+ self.assertIsInstance(actual, int, msg=msg)
+ expected = getattr(self, attr)
+ self.assertEqual(expected, actual, msg=msg)
+
+ def setAs(self, func):
+ """Set the number of groups to be as specific type."""
+ for attr in {'numGroups', 'numMicroGroups'}:
+ expected = getattr(self, attr)
+ setattr(self.univ, attr, func(expected))
if __name__ == '__main__':
unittest.main()
codecov[bot]: # [Codecov](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=h1) Report
> Merging [#369](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=desc) into [master](https://codecov.io/gh/pypa/twine/commit/34c08ef97d05d219ae018f041cd37e1d409b7a4d?src=pr&el=desc) will **decrease** coverage by `0.45%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #369 +/- ##
=========================================
- Coverage 74.55% 74.1% -0.46%
=========================================
Files 13 13
Lines 672 668 -4
Branches 101 100 -1
=========================================
- Hits 501 495 -6
- Misses 143 145 +2
Partials 28 28
```
| [Impacted Files](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [twine/utils.py](https://codecov.io/gh/pypa/twine/pull/369/diff?src=pr&el=tree#diff-dHdpbmUvdXRpbHMucHk=) | `82.25% <100%> (-2.12%)` | :arrow_down: |
| [twine/wininst.py](https://codecov.io/gh/pypa/twine/pull/369/diff?src=pr&el=tree#diff-dHdpbmUvd2luaW5zdC5weQ==) | `29.72% <0%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=footer). Last update [34c08ef...6e1a1ea](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
anlutro: I wonder if I could remove this line and just always configure `testpypi`, regardless of `index-servers`? I left it in to stay consistent with old behaviour. https://github.com/pypa/twine/pull/369/files#diff-547bab308763f89cacec226151fcbb80R83
anlutro: docs failed in Travis - unrelated, I guess?
theacodes: Yes, unrelated and up to me to fix. Can you get lint passing?
anlutro: Amended, lint should pass now.
diff --git a/twine/utils.py b/twine/utils.py
index d83e080..4feca1b 100644
--- a/twine/utils.py
+++ b/twine/utils.py
@@ -21,6 +21,7 @@ import getpass
import sys
import argparse
import warnings
+import collections
from requests.exceptions import HTTPError
@@ -48,68 +49,52 @@ TEST_REPOSITORY = "https://test.pypi.org/legacy/"
def get_config(path="~/.pypirc"):
+ # even if the config file does not exist, set up the parser
+ # variable to reduce the number of if/else statements
+ parser = configparser.RawConfigParser()
+
+ # this list will only be used if index-servers
+ # is not defined in the config file
+ index_servers = ["pypi", "testpypi"]
+
+ # default configuration for each repository
+ defaults = {"username": None, "password": None}
+
# Expand user strings in the path
path = os.path.expanduser(path)
- if not os.path.isfile(path):
- return {"pypi": {"repository": DEFAULT_REPOSITORY,
- "username": None,
- "password": None
- },
- "pypitest": {"repository": TEST_REPOSITORY,
- "username": None,
- "password": None
- },
- }
-
# Parse the rc file
- parser = configparser.RawConfigParser()
- parser.read(path)
-
- # Get a list of repositories from the config file
- # format: https://docs.python.org/3/distutils/packageindex.html#pypirc
- if (parser.has_section("distutils") and
- parser.has_option("distutils", "index-servers")):
- repositories = parser.get("distutils", "index-servers").split()
- elif parser.has_section("pypi"):
- # Special case: if the .pypirc file has a 'pypi' section,
- # even if there's no list of index servers,
- # be lenient and include that in our list of repositories.
- repositories = ['pypi']
- else:
- repositories = []
+ if os.path.isfile(path):
+ parser.read(path)
- config = {}
+ # Get a list of index_servers from the config file
+ # format: https://docs.python.org/3/distutils/packageindex.html#pypirc
+ if parser.has_option("distutils", "index-servers"):
+ index_servers = parser.get("distutils", "index-servers").split()
- defaults = {"username": None, "password": None}
- if parser.has_section("server-login"):
for key in ["username", "password"]:
if parser.has_option("server-login", key):
defaults[key] = parser.get("server-login", key)
- for repository in repositories:
- # Skip this repository if it doesn't exist in the config file
- if not parser.has_section(repository):
- continue
+ config = collections.defaultdict(lambda: defaults.copy())
- # Mandatory configuration and defaults
- config[repository] = {
- "repository": DEFAULT_REPOSITORY,
- "username": None,
- "password": None,
- }
+ # don't require users to manually configure URLs for these repositories
+ config["pypi"]["repository"] = DEFAULT_REPOSITORY
+ if "testpypi" in index_servers:
+ config["testpypi"]["repository"] = TEST_REPOSITORY
- # Optional configuration values
+ # optional configuration values for individual repositories
+ for repository in index_servers:
for key in [
"username", "repository", "password",
"ca_cert", "client_cert",
]:
if parser.has_option(repository, key):
config[repository][key] = parser.get(repository, key)
- elif defaults.get(key):
- config[repository][key] = defaults[key]
- return config
+ # convert the defaultdict to a regular dict at this point
+ # to prevent surprising behavior later on
+ return dict(config)
def get_repository_from_config(config_file, repository, repository_url=None):
Twine should have a built-in alias for testpypi
Instead of needing to specify the full upload URL for Test PyPI we should always have an alias ready, for example:
```
twine upload --repository=testpypi dist/*
```
Should work even without a `-/.pypirc`. If `testpypi` is defined in `~/.pypic`, it should take precedence.
diff --git a/tornado/autoreload.py b/tornado/autoreload.py
index 2f911270..7d69474a 100644
--- a/tornado/autoreload.py
+++ b/tornado/autoreload.py
@@ -107,6 +107,9 @@ _watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
+_autoreload_is_main = False
+_original_argv = None
+_original_spec = None
def start(check_time=500):
@@ -214,11 +217,15 @@ def _reload():
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
- spec = getattr(sys.modules['__main__'], '__spec__', None)
- if spec:
- argv = ['-m', spec.name] + sys.argv[1:]
+ if _autoreload_is_main:
+ spec = _original_spec
+ argv = _original_argv
else:
+ spec = getattr(sys.modules['__main__'], '__spec__', None)
argv = sys.argv
+ if spec:
+ argv = ['-m', spec.name] + argv[1:]
+ else:
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
@@ -226,7 +233,7 @@ def _reload():
os.environ.get("PYTHONPATH", ""))
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
- sys.exit(0)
+ os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
@@ -269,7 +276,17 @@ def main():
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
+ # Remember that we were launched with autoreload as main.
+ # The main module can be tricky; set the variables both in our globals
+ # (which may be __main__) and the real importable version.
+ import tornado.autoreload
+ global _autoreload_is_main
+ global _original_argv, _original_spec
+ tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
+ tornado.autoreload._original_argv = _original_argv = original_argv
+ original_spec = getattr(sys.modules['__main__'], '__spec__', None)
+ tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
diff --git a/tornado/iostream.py b/tornado/iostream.py
index 89e1e234..63110a1a 100644
--- a/tornado/iostream.py
+++ b/tornado/iostream.py
@@ -1410,13 +1410,7 @@ class IOStream(BaseIOStream):
return future
def _handle_connect(self):
- try:
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- except socket.error as e:
- # Hurd doesn't allow SO_ERROR for loopback sockets because all
- # errors for such sockets are reported synchronously.
- if errno_from_exception(e) == errno.ENOPROTOOPT:
- err = 0
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
diff --git a/tornado/netutil.py b/tornado/netutil.py
index e63683ad..08c9d886 100644
--- a/tornado/netutil.py
+++ b/tornado/netutil.py
@@ -138,12 +138,7 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR.
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
@@ -185,12 +180,7 @@ if hasattr(socket, 'AF_UNIX'):
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
diff --git a/tornado/web.py b/tornado/web.py
index 6760b0b9..f970bd13 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -749,18 +749,7 @@ class RequestHandler(object):
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
- """Renders the template with the given arguments as the response.
-
- ``render()`` calls ``finish()``, so no other output methods can be called
- after it.
-
- Returns a `.Future` with the same semantics as the one returned by `finish`.
- Awaiting this `.Future` is optional.
-
- .. versionchanged:: 5.1
-
- Now returns a `.Future` instead of ``None``.
- """
+ """Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
@@ -821,7 +810,7 @@ class RequestHandler(object):
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
- return self.finish(html)
+ self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
@@ -1004,20 +993,7 @@ class RequestHandler(object):
return future
def finish(self, chunk=None):
- """Finishes this response, ending the HTTP request.
-
- Passing a ``chunk`` to ``finish()`` is equivalent to passing that
- chunk to ``write()`` and then calling ``finish()`` with no arguments.
-
- Returns a `.Future` which may optionally be awaited to track the sending
- of the response to the client. This `.Future` resolves when all the response
- data has been sent, and raises an error if the connection is closed before all
- data can be sent.
-
- .. versionchanged:: 5.1
-
- Now returns a `.Future` instead of ``None``.
- """
+ """Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
@@ -1049,13 +1025,12 @@ class RequestHandler(object):
# are keepalive connections)
self.request.connection.set_close_callback(None)
- future = self.flush(include_footers=True)
+ self.flush(include_footers=True)
self.request.connection.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
- return future
def detach(self):
"""Take control of the underlying stream.
autoreload: Fix argv preservation
`autoreload` currently has a wrapper mode (e.g. `python -m tornado.autoreload -m tornado.test`) for scripts, and an in-process mode (enabled by `Application(..., debug=True)`). It's useful to combine these, since the wrapper can catch syntax errors that cause the process to abort before entering its IOLoop. However, this doesn't work as well as it should, because the `main` wrapper only restores `sys.argv` if the process exits, meaning the `-m tornado.autoreload` flags are lost if the inner autoreload fires. The original argv needs to be stored in a global when `autoreload` is `__main__`, so that it can be used in `_reload()`.
tornadoweb/tornado
diff --git a/tornado/test/autoreload_test.py b/tornado/test/autoreload_test.py
index 6a9729db..1ea53167 100644
--- a/tornado/test/autoreload_test.py
+++ b/tornado/test/autoreload_test.py
@@ -1,14 +1,19 @@
from __future__ import absolute_import, division, print_function
import os
+import shutil
import subprocess
from subprocess import Popen
import sys
from tempfile import mkdtemp
+import time
from tornado.test.util import unittest
-MAIN = """\
+class AutoreloadTest(unittest.TestCase):
+
+ def test_reload_module(self):
+ main = """\
import os
import sys
@@ -24,15 +29,13 @@ if 'TESTAPP_STARTED' not in os.environ:
autoreload._reload()
"""
-
-class AutoreloadTest(unittest.TestCase):
- def test_reload_module(self):
# Create temporary test application
path = mkdtemp()
+ self.addCleanup(shutil.rmtree, path)
os.mkdir(os.path.join(path, 'testapp'))
open(os.path.join(path, 'testapp/__init__.py'), 'w').close()
with open(os.path.join(path, 'testapp/__main__.py'), 'w') as f:
- f.write(MAIN)
+ f.write(main)
# Make sure the tornado module under test is available to the test
# application
@@ -46,3 +49,64 @@ class AutoreloadTest(unittest.TestCase):
universal_newlines=True)
out = p.communicate()[0]
self.assertEqual(out, 'Starting\nStarting\n')
+
+ def test_reload_wrapper_preservation(self):
+ # This test verifies that when `python -m tornado.autoreload`
+ # is used on an application that also has an internal
+ # autoreload, the reload wrapper is preserved on restart.
+ main = """\
+import os
+import sys
+
+# This import will fail if path is not set up correctly
+import testapp
+
+if 'tornado.autoreload' not in sys.modules:
+ raise Exception('started without autoreload wrapper')
+
+import tornado.autoreload
+
+print('Starting')
+sys.stdout.flush()
+if 'TESTAPP_STARTED' not in os.environ:
+ os.environ['TESTAPP_STARTED'] = '1'
+ # Simulate an internal autoreload (one not caused
+ # by the wrapper).
+ tornado.autoreload._reload()
+else:
+ # Exit directly so autoreload doesn't catch it.
+ os._exit(0)
+"""
+
+ # Create temporary test application
+ path = mkdtemp()
+ os.mkdir(os.path.join(path, 'testapp'))
+ self.addCleanup(shutil.rmtree, path)
+ init_file = os.path.join(path, 'testapp', '__init__.py')
+ open(init_file, 'w').close()
+ main_file = os.path.join(path, 'testapp', '__main__.py')
+ with open(main_file, 'w') as f:
+ f.write(main)
+
+ # Make sure the tornado module under test is available to the test
+ # application
+ pythonpath = os.getcwd()
+ if 'PYTHONPATH' in os.environ:
+ pythonpath += os.pathsep + os.environ['PYTHONPATH']
+
+ autoreload_proc = Popen(
+ [sys.executable, '-m', 'tornado.autoreload', '-m', 'testapp'],
+ stdout=subprocess.PIPE, cwd=path,
+ env=dict(os.environ, PYTHONPATH=pythonpath),
+ universal_newlines=True)
+
+ for i in range(20):
+ if autoreload_proc.poll() is not None:
+ break
+ time.sleep(0.1)
+ else:
+ autoreload_proc.kill()
+ raise Exception("subprocess failed to terminate")
+
+ out = autoreload_proc.communicate()[0]
+ self.assertEqual(out, 'Starting\n' * 2)
diff --git a/tornado/test/web_test.py b/tornado/test/web_test.py
index b77311df..45072aac 100644
--- a/tornado/test/web_test.py
+++ b/tornado/test/web_test.py
@@ -191,40 +191,6 @@ class SecureCookieV2Test(unittest.TestCase):
self.assertEqual(new_handler.get_secure_cookie('foo'), None)
-class FinalReturnTest(WebTestCase):
- def get_handlers(self):
- test = self
-
- class FinishHandler(RequestHandler):
- @gen.coroutine
- def get(self):
- test.final_return = self.finish()
-
- class RenderHandler(RequestHandler):
- def create_template_loader(self, path):
- return DictLoader({'foo.html': 'hi'})
-
- @gen.coroutine
- def get(self):
- test.final_return = self.render('foo.html')
-
- return [("/finish", FinishHandler),
- ("/render", RenderHandler)]
-
- def get_app_kwargs(self):
- return dict(template_path='FinalReturnTest')
-
- def test_finish_method_return_future(self):
- response = self.fetch(self.get_url('/finish'))
- self.assertEqual(response.code, 200)
- self.assertIsInstance(self.final_return, Future)
-
- def test_render_method_return_future(self):
- response = self.fetch(self.get_url('/render'))
- self.assertEqual(response.code, 200)
- self.assertIsInstance(self.final_return, Future)
-
-
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
diff --git a/tornado/iostream.py b/tornado/iostream.py
index 89e1e234..63110a1a 100644
--- a/tornado/iostream.py
+++ b/tornado/iostream.py
@@ -1410,13 +1410,7 @@ class IOStream(BaseIOStream):
return future
def _handle_connect(self):
- try:
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- except socket.error as e:
- # Hurd doesn't allow SO_ERROR for loopback sockets because all
- # errors for such sockets are reported synchronously.
- if errno_from_exception(e) == errno.ENOPROTOOPT:
- err = 0
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
diff --git a/tornado/netutil.py b/tornado/netutil.py
index e63683ad..08c9d886 100644
--- a/tornado/netutil.py
+++ b/tornado/netutil.py
@@ -138,12 +138,7 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR.
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
@@ -185,12 +180,7 @@ if hasattr(socket, 'AF_UNIX'):
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
diff --git a/tornado/web.py b/tornado/web.py
index f970bd13..6760b0b9 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -749,7 +749,18 @@ class RequestHandler(object):
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
- """Renders the template with the given arguments as the response."""
+ """Renders the template with the given arguments as the response.
+
+ ``render()`` calls ``finish()``, so no other output methods can be called
+ after it.
+
+ Returns a `.Future` with the same semantics as the one returned by `finish`.
+ Awaiting this `.Future` is optional.
+
+ .. versionchanged:: 5.1
+
+ Now returns a `.Future` instead of ``None``.
+ """
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
@@ -810,7 +821,7 @@ class RequestHandler(object):
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
- self.finish(html)
+ return self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
@@ -993,7 +1004,20 @@ class RequestHandler(object):
return future
def finish(self, chunk=None):
- """Finishes this response, ending the HTTP request."""
+ """Finishes this response, ending the HTTP request.
+
+ Passing a ``chunk`` to ``finish()`` is equivalent to passing that
+ chunk to ``write()`` and then calling ``finish()`` with no arguments.
+
+ Returns a `.Future` which may optionally be awaited to track the sending
+ of the response to the client. This `.Future` resolves when all the response
+ data has been sent, and raises an error if the connection is closed before all
+ data can be sent.
+
+ .. versionchanged:: 5.1
+
+ Now returns a `.Future` instead of ``None``.
+ """
if self._finished:
raise RuntimeError("finish() called twice")
@@ -1025,12 +1049,13 @@ class RequestHandler(object):
# are keepalive connections)
self.request.connection.set_close_callback(None)
- self.flush(include_footers=True)
+ future = self.flush(include_footers=True)
self.request.connection.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
+ return future
def detach(self):
"""Take control of the underlying stream.
RequestHandler.finish should return a Future
`RequestHandler.finish` may call `flush()`, which returns a Future, but this Future is simply discarded. The main reason for that Future is flow control in streaming responses, which is no longer relevant by the time we are closing the connection, but it also contains errors if the stream is closed while the response is streamed. This error will be logged as a stack trace if left uncaught, so some applications may wish to await their calls to `finish()` to be able to catch it.
This logic also extends to `render()`, which calls `finish()`.
From https://github.com/tornadoweb/tornado/issues/2055#issuecomment-304456147
diff --git a/marshmallow/fields.py b/marshmallow/fields.py
index ecfd28d4..737fbfb0 100755
--- a/marshmallow/fields.py
+++ b/marshmallow/fields.py
@@ -1134,6 +1134,15 @@ class Dict(Field):
'marshmallow.base.FieldABC')
self.key_container = keys
+ def _add_to_schema(self, field_name, schema):
+ super(Dict, self)._add_to_schema(field_name, schema)
+ if self.value_container:
+ self.value_container.parent = self
+ self.value_container.name = field_name
+ if self.key_container:
+ self.key_container.parent = self
+ self.key_container.name = field_name
+
def _serialize(self, value, attr, obj):
if value is None:
return None
Question: How can I pass the context in a nested field of a structured dict?
I noticed that if you use a nested field for values in a structured Dict, the context is not automatically given to the nested schema. Is there a way to pass it the context?
Example:
```python
class Inner(Schema):
foo = fields.String()
@validates('foo')
def validate_foo(self, value):
if 'foo_context' not in self.context:
raise ValidationError('no context!')
class Outer(Schema):
bar = fields.Dict(values=fields.Nested(Inner))
# gives no error:
Inner(context={'foo_context': 'foo'}).load({'foo': 'some foo'})
# gives 'no context!' error:
Outer(context={'foo_context': 'foo'}).load({'bar': { 'key': {'foo': 'some foo'}}})
```
marshmallow-code/marshmallow
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 17c04300..9fee0d63 100755
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -2134,6 +2134,27 @@ class TestContext:
outer.context['foo_context'] = 'foo'
assert outer.load({'bars': [{'foo': 42}]})
+ # Regression test for https://github.com/marshmallow-code/marshmallow/issues/820
+ def test_nested_dict_fields_inherit_context(self):
+ class InnerSchema(Schema):
+ foo = fields.Field()
+
+ @validates('foo')
+ def validate_foo(self, value):
+ if 'foo_context' not in self.context:
+ raise ValidationError('Missing context')
+
+ class OuterSchema(Schema):
+ bars = fields.Dict(values=fields.Nested(InnerSchema()))
+
+ inner = InnerSchema()
+ inner.context['foo_context'] = 'foo'
+ assert inner.load({'foo': 42})
+
+ outer = OuterSchema()
+ outer.context['foo_context'] = 'foo'
+ assert outer.load({'bars': {'test': {'foo': 42}}})
+
def test_serializer_can_specify_nested_object_as_attribute(blog):
class BlogUsernameSchema(Schema):
diff --git a/tornado/curl_httpclient.py b/tornado/curl_httpclient.py
index 54fc5b36..ef98225c 100644
--- a/tornado/curl_httpclient.py
+++ b/tornado/curl_httpclient.py
@@ -348,8 +348,8 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
- credentials = '%s:%s' % (request.proxy_username,
- request.proxy_password)
+ credentials = httputil.encode_username_password(request.proxy_username,
+ request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
if (request.proxy_auth_mode is None or
@@ -441,8 +441,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
curl.setopt(pycurl.INFILESIZE, len(request.body or ''))
if request.auth_username is not None:
- userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
-
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
@@ -450,7 +448,9 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
- curl.setopt(pycurl.USERPWD, native_str(userpwd))
+ userpwd = httputil.encode_username_password(request.auth_username,
+ request.auth_password)
+ curl.setopt(pycurl.USERPWD, userpwd)
curl_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
diff --git a/tornado/httputil.py b/tornado/httputil.py
index 22a64c31..d1ace5a8 100644
--- a/tornado/httputil.py
+++ b/tornado/httputil.py
@@ -29,11 +29,12 @@ import email.utils
import numbers
import re
import time
+import unicodedata
import warnings
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
-from tornado.util import ObjectDict, PY3
+from tornado.util import ObjectDict, PY3, unicode_type
if PY3:
import http.cookies as Cookie
@@ -949,6 +950,20 @@ def _encode_header(key, pdict):
return '; '.join(out)
+def encode_username_password(username, password):
+ """Encodes a username/password pair in the format used by HTTP auth.
+
+ The return value is a byte string in the form ``username:password``.
+
+ .. versionadded:: 5.1
+ """
+ if isinstance(username, unicode_type):
+ username = unicodedata.normalize('NFC', username)
+ if isinstance(password, unicode_type):
+ password = unicodedata.normalize('NFC', password)
+ return utf8(username) + b":" + utf8(password)
+
+
def doctests():
import doctest
return doctest.DocTestSuite()
diff --git a/tornado/simple_httpclient.py b/tornado/simple_httpclient.py
index 4df4898a..35c71936 100644
--- a/tornado/simple_httpclient.py
+++ b/tornado/simple_httpclient.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from tornado.escape import utf8, _unicode
+from tornado.escape import _unicode
from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil
@@ -308,9 +308,9 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
if self.request.auth_mode not in (None, "basic"):
raise ValueError("unsupported auth_mode %s",
self.request.auth_mode)
- auth = utf8(username) + b":" + utf8(password)
- self.request.headers["Authorization"] = (b"Basic " +
- base64.b64encode(auth))
+ self.request.headers["Authorization"] = (
+ b"Basic " + base64.b64encode(
+ httputil.encode_username_password(username, password)))
if self.request.user_agent:
self.request.headers["User-Agent"] = self.request.user_agent
if not self.request.allow_nonstandard_methods:
Unable to use non-ascii characters in user/password for basic auth in curl_httpclient
Steps to reproduce (Python 3.4):
1. Create tornado.httpclient.HTTPRequest with auth_username or auth_password which contains non-ascii (lower range, 0-128), for example pound sterling £ (which is 153 in ascii).
2. Execute curl_httpclient fetch using that request
Expected result:
1. The request is successfully completed
Actual result:
2. HTTP 599 is returned and internal exception is:
'ascii' codec can't encode character '\xa3' in position 55: ordinal not in range(128)
I am not sure if I am not aware of the proper solution, but I have tried providing bytes as auth_password, but it does not solve the issue because https://github.com/tornadoweb/tornado/blob/master/tornado/curl_httpclient.py#L438 internally uses string formatting. Reading through pycurl docs (http://pycurl.io/docs/latest/unicode.html) suggests that for Python3 bytes array should be used when using curl setopt. It seems like Python3 vs Python2 issue?
tornadoweb/tornado
diff --git a/tornado/test/curl_httpclient_test.py b/tornado/test/curl_httpclient_test.py
index b7a85952..4230d4cd 100644
--- a/tornado/test/curl_httpclient_test.py
+++ b/tornado/test/curl_httpclient_test.py
@@ -32,13 +32,15 @@ class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
class DigestAuthHandler(RequestHandler):
+ def initialize(self, username, password):
+ self.username = username
+ self.password = password
+
def get(self):
realm = 'test'
opaque = 'asdf'
# Real implementations would use a random nonce.
nonce = "1234"
- username = 'foo'
- password = 'bar'
auth_header = self.request.headers.get('Authorization', None)
if auth_header is not None:
@@ -53,9 +55,9 @@ class DigestAuthHandler(RequestHandler):
assert param_dict['realm'] == realm
assert param_dict['opaque'] == opaque
assert param_dict['nonce'] == nonce
- assert param_dict['username'] == username
+ assert param_dict['username'] == self.username
assert param_dict['uri'] == self.request.path
- h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()
+ h1 = md5(utf8('%s:%s:%s' % (self.username, realm, self.password))).hexdigest()
h2 = md5(utf8('%s:%s' % (self.request.method,
self.request.path))).hexdigest()
digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
@@ -88,7 +90,8 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
- ('/digest', DigestAuthHandler),
+ ('/digest', DigestAuthHandler, {'username': 'foo', 'password': 'bar'}),
+ ('/digest_non_ascii', DigestAuthHandler, {'username': 'foo', 'password': 'barユ£'}),
('/custom_reason', CustomReasonHandler),
('/custom_fail_reason', CustomFailReasonHandler),
])
@@ -143,3 +146,8 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):
# during the setup phase doesn't lead the request to
# be dropped on the floor.
response = self.fetch(u'/ユニコード', raise_error=True)
+
+ def test_digest_auth_non_ascii(self):
+ response = self.fetch('/digest_non_ascii', auth_mode='digest',
+ auth_username='foo', auth_password='barユ£')
+ self.assertEqual(response.body, b'ok')
diff --git a/tornado/test/httpclient_test.py b/tornado/test/httpclient_test.py
index 60c8f490..fb8b12d5 100644
--- a/tornado/test/httpclient_test.py
+++ b/tornado/test/httpclient_test.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import base64
@@ -8,6 +9,7 @@ import sys
import threading
import datetime
from io import BytesIO
+import unicodedata
from tornado.escape import utf8, native_str
from tornado import gen
@@ -237,6 +239,7 @@ Transfer-Encoding: chunked
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
+ # This test data appears in section 2 of RFC 7617.
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
@@ -247,6 +250,20 @@ Transfer-Encoding: chunked
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
+ def test_basic_auth_unicode(self):
+ # This test data appears in section 2.1 of RFC 7617.
+ self.assertEqual(self.fetch("/auth", auth_username="test",
+ auth_password="123£").body,
+ b"Basic dGVzdDoxMjPCow==")
+
+ # The standard mandates NFC. Give it a decomposed username
+ # and ensure it is normalized to composed form.
+ username = unicodedata.normalize("NFD", u"josé")
+ self.assertEqual(self.fetch("/auth",
+ auth_username=username,
+ auth_password="səcrət").body,
+ b"Basic am9zw6k6c8mZY3LJmXQ=")
+
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
ogrisel: > For the future, should we do the same as in joblib and run the test of joblib as part of the CI?
I agree but let's do that in another PR.
diff --git a/loky/backend/semaphore_tracker.py b/loky/backend/semaphore_tracker.py
index 79587f2..f494237 100644
--- a/loky/backend/semaphore_tracker.py
+++ b/loky/backend/semaphore_tracker.py
@@ -203,7 +203,6 @@ def main(fd):
try:
sem_unlink(name)
if VERBOSE: # pragma: no cover
- name = name.decode('ascii')
sys.stderr.write("[SemaphoreTracker] unlink {}\n"
.format(name))
sys.stderr.flush()
diff --git a/loky/backend/semlock.py b/loky/backend/semlock.py
index c94c4cd..2d35f6a 100644
--- a/loky/backend/semlock.py
+++ b/loky/backend/semlock.py
@@ -68,7 +68,7 @@ if sys.version_info[:2] < (3, 3):
def sem_unlink(name):
- if pthread.sem_unlink(name) < 0:
+ if pthread.sem_unlink(name.encode('ascii')) < 0:
raiseFromErrno()
@@ -153,8 +153,8 @@ class SemLock(object):
self.ident = 0
self.kind = kind
self.maxvalue = maxvalue
- self.name = name.encode('ascii')
- self.handle = _sem_open(self.name, value)
+ self.name = name
+ self.handle = _sem_open(self.name.encode('ascii'), value)
def __del__(self):
try:
@@ -265,7 +265,7 @@ class SemLock(object):
self.kind = kind
self.maxvalue = maxvalue
self.name = name
- self.handle = _sem_open(name)
+ self.handle = _sem_open(name.encode('ascii'))
return self
diff --git a/loky/backend/synchronize.py b/loky/backend/synchronize.py
index 2cdb43d..4773b9d 100644
--- a/loky/backend/synchronize.py
+++ b/loky/backend/synchronize.py
@@ -121,8 +121,7 @@ class SemLock(object):
@staticmethod
def _make_name():
# OSX does not support long names for semaphores
- name = '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
- return name
+ return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
#
loky.backend.semaphore_tracker.sem_unlink does not have same signature if coming from ctypes or _multiprocessing
* `_multi_processing.sem_unlink` takes `str`
* `loky.backend.semlock.sem_unlink` comes from `ctypes` and take `bytes`.
It feels like some code was written with the ctypes variant in mind and raise an error when the `_multiprocessing.sem_unlink` is called. Tests seem to be only testing `loky.backend.semlock.sem_unlink`.
#### Context
This is an error I just saw in a joblib Travis [build](https://travis-ci.org/joblib/joblib/jobs/346847911#L4044). Note this is with loky version 1.2.1.
```
E /home/travis/build/joblib/joblib/joblib/externals/loky/backend/semaphore_tracker.py:195: UserWarning: semaphore_tracker: There appear to be 6 leaked semaphores to clean up at shutdown
E len(cache))
E /home/travis/build/joblib/joblib/joblib/externals/loky/backend/semaphore_tracker.py:211: UserWarning: semaphore_tracker: b'/loky-5456-6haleho6': TypeError('argument 1 must be str, not bytes',)
E warnings.warn('semaphore_tracker: %r: %r' % (name, e))
```
Quickly looking at it, it seems like this is still in master. The code where the warning happens is here:
https://github.com/tomMoral/loky/blob/dec1c8144b12938dfe7bfc511009e12f25fd1cd9/loky/backend/semaphore_tracker.py#L203-L211
tomMoral/loky
diff --git a/tests/test_synchronize.py b/tests/test_synchronize.py
index 797070d..4794f17 100644
--- a/tests/test_synchronize.py
+++ b/tests/test_synchronize.py
@@ -22,7 +22,7 @@ if sys.version_info < (3, 3):
@pytest.mark.skipif(sys.platform == "win32", reason="UNIX test")
def test_semlock_failure():
from loky.backend.semlock import SemLock, sem_unlink
- name = "test1"
+ name = "loky-test-semlock"
sl = SemLock(0, 1, 1, name=name)
with pytest.raises(FileExistsError):
@@ -30,7 +30,7 @@ def test_semlock_failure():
sem_unlink(sl.name)
with pytest.raises(FileNotFoundError):
- SemLock._rebuild(None, 0, 0, name.encode('ascii'))
+ SemLock._rebuild(None, 0, 0, name)
def assert_sem_value_equal(sem, value):
xpconanfan: I don't see how this is related to logging stderr as the issue described.
One of the msg is incorrect?
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIjCGd2zhkKzp2ablI:b-x8c38) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
> ```Python
> break
> finally:
> (unhandled_out, err) = proc.communicate()
> ```
wait, so this does happen?
shouldn't we call the handler with this out instead?
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453#-:-LDIjnBO4MUgZxYNImBt:blud5im)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIkOBz8aQLOE0ovB8O:brjczjz) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
wait, so this does happen?
shouldn't we call the handler with this out instead?
</blockquote></details>
I'm not sure?
I'm adding logging here first to try to determine what the underlying problem is
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIl2971UfZ3LqypsHv:b332s67) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
I'm not sure?
I'm adding logging here first to try to determine what the underlying problem is
</blockquote></details>
seems like we should pipe all stdout content through the handler as this function promised?
you could add additional logging to signify the existence of stdout from `communicate`?
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[tests/mobly/controllers/android_device_lib/adb_test.py, line 156 at r1](https://reviewable.io/reviews/google/mobly/453#-LDImOOR-GL1gRhihxxl:-LDImOOR-GL1gRhihxxm:ba86vyn) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/tests/mobly/controllers/android_device_lib/adb_test.py#L156)):*
> ```Python
> def test_execute_and_process_stdout_logs_cmd(self, mock_debug_logger,
> mock_popen):
> self._mock_execute_and_process_stdout_process(mock_popen)
> ```
this test is relying on the default mock stdout value in `_mock_execute_and_process_stdout_process`, which is difficult to read.
Can we more explicitly set the mock value within the test?
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIsvuPD4jS9CVrpnr5:bcy1d3j) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
seems like we should pipe all stdout content through the handler as this function promised?
you could add additional logging to signify the existence of stdout from `communicate`?
</blockquote></details>
So, I've never seen this output actually get populated and I'm not sure it is in the case I'm debugging, but okay.
I'm preferring changing the logged command because otherwise you'd get semi-duplicate log lines.
---
*[tests/mobly/controllers/android_device_lib/adb_test.py, line 156 at r1](https://reviewable.io/reviews/google/mobly/453#-LDImOOR-GL1gRhihxxl:-LDItG5yEIdICTRzP1c0:b-896fix) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/tests/mobly/controllers/android_device_lib/adb_test.py#L156)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
this test is relying on the default mock stdout value in `_mock_execute_and_process_stdout_process`, which is difficult to read.
Can we more explicitly set the mock value within the test?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
xpconanfan: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453#-:-LEBweyyBV-cQJiSKgM7:bnfp4nl)*
<!-- Sent from Reviewable.io -->
diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py
index 90dcd0b..95d1261 100644
--- a/mobly/controllers/android_device_lib/adb.py
+++ b/mobly/controllers/android_device_lib/adb.py
@@ -203,6 +203,7 @@ class AdbProxy(object):
stderr=subprocess.PIPE,
shell=shell,
bufsize=1)
+ out = '[elided, processed via handler]'
try:
while proc.poll() is None:
line = proc.stdout.readline()
@@ -211,16 +212,19 @@ class AdbProxy(object):
else:
break
finally:
- (_, err) = proc.communicate()
+ (unexpected_out, err) = proc.communicate()
+ if unexpected_out:
+ out = '[unexpected stdout] %s' % unexpected_out
+ for line in unexpected_out.splitlines():
+ handler(line)
+
ret = proc.returncode
+ logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',
+ cli_cmd_to_string(args), out, err, ret)
if ret == 0:
return err
else:
- raise AdbError(
- cmd=args,
- stdout='[elided, processed via handler]',
- stderr=err,
- ret_code=ret)
+ raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
def _construct_adb_cmd(self, raw_name, args, shell):
"""Constructs an adb command with arguments for a subprocess call.
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index e3e835d..03674ff 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -125,8 +125,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
# Yaaay! We're done!
self.log.debug('Snippet %s started after %.1fs on host port %s',
- self.package,
- time.time() - start_time, self.host_port)
+ self.package, time.time() - start_time, self.host_port)
def restore_app_connection(self, port=None):
"""Restores the app after device got reconnected.
@@ -151,12 +150,13 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
try:
self.connect()
except:
- # Failed to connect to app, something went wrong.
+ # Log the original error and raise AppRestoreConnectionError.
+ self.log.exception('Failed to re-connect to app.')
raise jsonrpc_client_base.AppRestoreConnectionError(
- self._ad(
- 'Failed to restore app connection for %s at host port %s, '
- 'device port %s'), self.package, self.host_port,
- self.device_port)
+ self._ad,
+ ('Failed to restore app connection for %s at host port %s, '
+ 'device port %s') % (self.package, self.host_port,
+ self.device_port))
# Because the previous connection was lost, update self._proc
self._proc = None
diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py
index 08d357ff6..0a59aac26 100644
--- a/nipype/pipeline/engine/utils.py
+++ b/nipype/pipeline/engine/utils.py
@@ -1054,12 +1054,14 @@ def generate_expanded_graph(graph_in):
for src_id in list(old_edge_dict.keys()):
# Drop the original JoinNodes; only concerned with
# generated Nodes
- if hasattr(node, 'joinfield'):
+ if hasattr(node, 'joinfield') and node.itername == src_id:
continue
# Patterns:
# - src_id : Non-iterable node
- # - src_id.[a-z]\d+ : IdentityInterface w/ iterables
- # - src_id.[a-z]I.[a-z]\d+ : Non-IdentityInterface w/ iterables
+ # - src_id.[a-z]\d+ :
+ # IdentityInterface w/ iterables or nested JoinNode
+ # - src_id.[a-z]I.[a-z]\d+ :
+ # Non-IdentityInterface w/ iterables
# - src_idJ\d+ : JoinNode(IdentityInterface)
if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$',
node.itername):
PR #2479 has broken my package
### Summary
PR #2479 has broken my package (https://pypi.org/project/arcana/)
I am not quite sure what the rationale behind the changes are so it is difficult to know how to debug or whether there is something I can change in my package.
### Actual behavior
Workflow exits with error
```
File "/Users/tclose/git/ni/arcana/test/mwe/nipype_pr2479/test.py", line 71, in <module>
study.data('out')
File "/Users/tclose/git/ni/arcana/arcana/study/base.py", line 325, in data
visit_ids=visit_ids)
File "/Users/tclose/git/ni/arcana/arcana/runner/base.py", line 37, in run
return workflow.run(plugin=self._plugin)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/workflows.py", line 595, in run
runner.run(execgraph, updatehash=updatehash, config=self.config)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/plugins/linear.py", line 44, in run
node.run(updatehash=updatehash)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 480, in run
result = self._run_interface(execute=True)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 564, in _run_interface
return self._run_command(execute)
File "/Users/tclose/git/ni/arcana/arcana/node.py", line 59, in _run_command
result = self.nipype_cls._run_command(self, *args, **kwargs)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 888, in _run_command
self._collate_join_field_inputs()
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 898, in _collate_join_field_inputs
val = self._collate_input_value(field)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 928, in _collate_input_value
for idx in range(self._next_slot_index)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 947, in _slot_value
field, index, e))
AttributeError: The join node pipeline1.pipeline1_subject_session_outputs does not have a slot field subject_session_pairsJ1 to hold the subject_session_pairs value at index 0: 'DynamicTraitedSpec' object has no attribute 'subject_session_pairsJ1'
```
### Expected behavior
The workflow runs without error
### How to replicate the behavior
See script below
### Script/Workflow details
I have tried to come up with a MWE that doesn't use my package but it was proving difficult. However, you can now install my package with pip
`pip install arcana`
and run the following
```
import os.path
import shutil
from nipype import config
config.enable_debug_mode()
import nipype # @IgnorePep8
from nipype.interfaces.utility import IdentityInterface # @IgnorePep8
from arcana.dataset import DatasetMatch, DatasetSpec # @IgnorePep8
from arcana.data_format import text_format # @IgnorePep8
from arcana.study.base import Study, StudyMetaClass # @IgnorePep8
from arcana.archive.local import LocalArchive # @IgnorePep8
from arcana.runner import LinearRunner # @IgnorePep8
BASE_ARCHIVE_DIR = os.path.join(os.path.dirname(__file__), 'archives')
BASE_WORK_DIR = os.path.join(os.path.dirname(__file__), 'work')
print(nipype.get_info())
print(nipype.__version__)
class TestStudy(Study):
__metaclass__ = StudyMetaClass
add_data_specs = [
DatasetSpec('in', text_format),
DatasetSpec('out', text_format, 'pipeline')]
def pipeline(self, **kwargs):
pipeline = self.create_pipeline(
name='pipeline1',
inputs=[DatasetSpec('in', text_format)],
outputs=[DatasetSpec('out', text_format)],
desc="A dummy pipeline used to test 'run_pipeline' method",
version=1,
citations=[],
**kwargs)
ident = pipeline.create_node(IdentityInterface(['a']),
name="ident")
# Connect inputs
pipeline.connect_input('in', ident, 'a')
# Connect outputs
pipeline.connect_output('out', ident, 'a')
return pipeline
# Create archives
shutil.rmtree(BASE_ARCHIVE_DIR, ignore_errors=True)
shutil.rmtree(BASE_WORK_DIR, ignore_errors=True)
os.makedirs(BASE_ARCHIVE_DIR)
for sess in (['ARCHIVE1', 'SUBJECT', 'VISIT'],
['ARCHIVE2', 'SUBJECT1', 'VISIT1'],
['ARCHIVE2', 'SUBJECT1', 'VISIT2'],
['ARCHIVE2', 'SUBJECT2', 'VISIT1'],
['ARCHIVE2', 'SUBJECT2', 'VISIT2']):
sess_dir = os.path.join(*([BASE_ARCHIVE_DIR] + sess))
os.makedirs(sess_dir)
with open(os.path.join(sess_dir, 'in.txt'), 'w') as f:
f.write('in')
archive1_path = os.path.join(BASE_ARCHIVE_DIR, 'ARCHIVE1')
archive2_path = os.path.join(BASE_ARCHIVE_DIR, 'ARCHIVE2')
work1_path = os.path.join(BASE_WORK_DIR, 'WORK1')
work2_path = os.path.join(BASE_WORK_DIR, 'WORK2')
# Attempt to run with archive with 2 subjects and 2 visits
study = TestStudy('two',
LocalArchive(archive2_path),
LinearRunner(work2_path),
inputs=[DatasetMatch('in', text_format, 'in')])
# Fails here
study2.data('out')
print("Ran study 2")
#
study1 = TestStudy('one',
LocalArchive(archive1_path),
LinearRunner(work1_path),
inputs=[DatasetMatch('in', text_format, 'in')])
study1.data('out')
print("Ran study 1")
```
to reproduce the error
### Platform details:
{'nibabel_version': '2.2.1', 'sys_executable': '/usr/local/opt/python@2/bin/python2.7', 'networkx_version': '1.9', 'numpy_version': '1.14.3', 'sys_platform': 'darwin', 'sys_version': '2.7.15 (default, May 1 2018, 16:44:08) \n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]', 'commit_source': 'repository', 'commit_hash': '5a96ea54a', 'pkg_path': '/Users/tclose/git/ni/nipype/nipype', 'nipype_version': '1.0.4-dev+g5a96ea54a', 'traits_version': '4.6.0', 'scipy_version': '1.1.0'}
1.0.4-dev+g5a96ea54a
(problem arose in 1.0.1)
### Execution environment
My Homebrew python 2 environment outside container
diff --git a/mysensors/__init__.py b/mysensors/__init__.py
index f57486b..784f988 100644
--- a/mysensors/__init__.py
+++ b/mysensors/__init__.py
@@ -49,7 +49,7 @@ class Gateway(object):
self.metric = True # if true - use metric, if false - use imperial
if persistence:
self.persistence = Persistence(
- self.sensors, persistence_file, persistence_scheduler)
+ self.sensors, persistence_scheduler, persistence_file)
else:
self.persistence = None
self.protocol_version = safe_is_version(protocol_version)
@@ -351,7 +351,8 @@ class ThreadingGateway(Gateway):
def __init__(self, *args, **kwargs):
"""Set up gateway instance."""
- super().__init__(*args, **kwargs)
+ super().__init__(
+ *args, persistence_scheduler=self._create_scheduler, **kwargs)
self.lock = threading.Lock()
self._stop_event = threading.Event()
self._cancel_save = None
@@ -373,12 +374,22 @@ class ThreadingGateway(Gateway):
continue
time.sleep(0.02)
+ def _create_scheduler(self, save_sensors):
+ """Return function to schedule saving sensors."""
+ def schedule_save():
+ """Save sensors and schedule a new save."""
+ save_sensors()
+ scheduler = threading.Timer(10.0, schedule_save)
+ scheduler.start()
+ self._cancel_save = scheduler.cancel
+ return schedule_save
+
def start_persistence(self):
"""Load persistence file and schedule saving of persistence file."""
if not self.persistence:
return
self.persistence.safe_load_sensors()
- self._cancel_save = self.persistence.schedule_save_sensors()
+ self.persistence.schedule_save_sensors()
def stop(self):
"""Stop the background thread."""
@@ -494,7 +505,7 @@ class BaseAsyncGateway(BaseTransportGateway):
"""Return function to schedule saving sensors."""
@asyncio.coroutine
def schedule_save():
- """Return a function to cancel the schedule."""
+ """Save sensors and schedule a new save."""
yield from self.loop.run_in_executor(None, save_sensors)
callback = partial(
ensure_future, schedule_save(), loop=self.loop)
diff --git a/mysensors/persistence.py b/mysensors/persistence.py
index 5dd4b57..efb2e6c 100644
--- a/mysensors/persistence.py
+++ b/mysensors/persistence.py
@@ -3,35 +3,21 @@ import json
import logging
import os
import pickle
-import threading
from .sensor import ChildSensor, Sensor
_LOGGER = logging.getLogger(__name__)
-def create_scheduler(save_sensors):
- """Return function to schedule saving sensors."""
- def schedule_save():
- """Return a function to cancel the schedule."""
- save_sensors()
- scheduler = threading.Timer(10.0, schedule_save)
- scheduler.start()
- return scheduler.cancel
- return schedule_save
-
-
class Persistence(object):
"""Organize persistence file saving and loading."""
def __init__(
- self, sensors, persistence_file='mysensors.pickle',
- schedule_factory=None):
+ self, sensors, schedule_factory,
+ persistence_file='mysensors.pickle'):
"""Set up Persistence instance."""
self.persistence_file = persistence_file
self.persistence_bak = '{}.bak'.format(self.persistence_file)
- if schedule_factory is None:
- schedule_factory = create_scheduler
self.schedule_save_sensors = schedule_factory(self.save_sensors)
self._sensors = sensors
self.need_save = True
Main program does not exit cleanly
[branch master - version 0.14.0 - Using serial gateway - NO asyncio]
After calling the SerialGateway.stop() method the program does not return to console but seems to be looping in a still alive thread ( probably the persistence thread).
************************************************************************************
```py
MYSGW_Serial_Port = '/dev/ttyMSGW'
....
GATEWAY = mysensors.SerialGateway(
MYSGW_Serial_Port, event_callback=event, persistence=True,
persistence_file='./mysensors.json', protocol_version='2.0', baud=115200,
timeout=1.0, reconnect_timeout=10.0)
GATEWAY.start_persistence()
GATEWAY.start()
....
....
GATEWAY.stop() #-> main thread does not go past this point
exit(0)
```
*************************************************************************************
diff --git a/dark/__init__.py b/dark/__init__.py
index 0246a07..6a59296 100644
--- a/dark/__init__.py
+++ b/dark/__init__.py
@@ -7,4 +7,4 @@ if sys.version_info < (2, 7):
# will not be found by the version() function in ../setup.py
#
# Remember to update ../CHANGELOG.md describing what's new in each version.
-__version__ = '3.0.5'
+__version__ = '3.0.6'
diff --git a/dark/filter.py b/dark/filter.py
index 0665ffc..b0daa76 100644
--- a/dark/filter.py
+++ b/dark/filter.py
@@ -279,6 +279,23 @@ def addFASTAFilteringCommandLineOptions(parser):
help=('A file of (1-based) sequence numbers to retain. Numbers must '
'be one per line.'))
+ parser.add_argument(
+ '--idLambda', metavar='LAMBDA-FUNCTION',
+ help=('A one-argument function taking and returning a read id. '
+ 'E.g., --idLambda "lambda id: id.split(\'_\')[0]" or '
+ '--idLambda "lambda id: id[:10]". If the function returns None, '
+ 'the read will be filtered out.'))
+
+ parser.add_argument(
+ '--readLambda', metavar='LAMBDA-FUNCTION',
+ help=('A one-argument function taking and returning a read. '
+ 'E.g., --readLambda "lambda r: Read(r.id.split(\'_\')[0], '
+ 'r.sequence.strip(\'-\')". Make sure to also modify the quality '
+ 'string if you change the length of a FASTQ sequence. If the '
+ 'function returns None, the read will be filtered out. The '
+ 'function will be passed to eval with the dark.reads classes '
+ 'Read, DNARead, AARead, etc. all in scope.'))
+
# A mutually exclusive group for --keepSites, --keepSitesFile,
# --removeSites, and --removeSitesFile.
group = parser.add_mutually_exclusive_group()
@@ -381,4 +398,5 @@ def parseFASTAFilteringCommandLineOptions(args, reads):
randomSubset=args.randomSubset, trueLength=args.trueLength,
sampleFraction=args.sampleFraction,
sequenceNumbersFile=args.sequenceNumbersFile,
+ idLambda=args.idLambda, readLambda=args.readLambda,
keepSites=keepSites, removeSites=removeSites)
diff --git a/dark/reads.py b/dark/reads.py
index 42390e4..1074f78 100644
--- a/dark/reads.py
+++ b/dark/reads.py
@@ -740,8 +740,9 @@ class ReadFilter(object):
sequence identity.
@param removeDuplicatesById: If C{True} remove duplicated reads based
only on read id.
- @param removeDescriptions: If C{True} remove the description part of read
- ids (i.e., the part following the first whitespace).
+ @param removeDescriptions: If C{True} remove the description (the part
+ following the first whitespace) from read ids. The description is
+ removed after applying the function specified by --idLambda (if any).
@param modifier: If not C{None}, a function that is passed a read
and which either returns a read or C{None}. If it returns a read,
that read is passed through the filter. If it returns C{None},
@@ -791,6 +792,14 @@ class ReadFilter(object):
file containing (1-based) sequence numbers, in ascending order,
one per line. Only those sequences matching the given numbers will
be kept.
+ @param idLambda: If not C{None}, a C{str} Python lambda function
+ specification to use to modify read ids. The function is applied
+ before removing the description (if --removeDescriptions is also
+ specified).
+ @param readLambda: If not C{None}, a C{str} Python lambda function
+ specification to use to modify reads. The function will be passed,
+ and must return, a single Read (or one of its subclasses). This
+ function is called after the --idLambda function, if any.
@param keepSites: A set of C{int} 0-based sites (i.e., indices) in
sequences that should be kept. If C{None} (the default), all sites are
kept.
@@ -819,7 +828,8 @@ class ReadFilter(object):
removeDuplicates=False, removeDuplicatesById=False,
removeDescriptions=False, modifier=None, randomSubset=None,
trueLength=None, sampleFraction=None,
- sequenceNumbersFile=None, keepSites=None, removeSites=None):
+ sequenceNumbersFile=None, idLambda=None, readLambda=None,
+ keepSites=None, removeSites=None):
if randomSubset is not None:
if sampleFraction is not None:
@@ -929,6 +939,9 @@ class ReadFilter(object):
sampleFraction = None
self.sampleFraction = sampleFraction
+ self.idLambda = eval(idLambda) if idLambda else None
+ self.readLambda = eval(readLambda) if readLambda else None
+
def filter(self, read):
"""
Check if a read passes the filter.
@@ -1038,6 +1051,20 @@ class ReadFilter(object):
elif self.removeSites is not None:
read = read.newFromSites(self.removeSites, exclude=True)
+ if self.idLambda:
+ newId = self.idLambda(read.id)
+ if newId is None:
+ return False
+ else:
+ read.id = newId
+
+ if self.readLambda:
+ newRead = self.readLambda(read)
+ if newRead is None:
+ return False
+ else:
+ read = newRead
+
if self.removeDescriptions:
read.id = read.id.split()[0]
Add ability to give an anonymous Python function for read id conversion when filtering FASTA
acorg/dark-matter
diff --git a/test/test_reads.py b/test/test_reads.py
index 4e51442..5d9cd3e 100644
--- a/test/test_reads.py
+++ b/test/test_reads.py
@@ -3126,6 +3126,52 @@ class TestReadsFiltering(TestCase):
six.assertRaisesRegex(self, ValueError, error, Reads().filter,
keepSites={4}, removeSites={5})
+ def testIdLambda(self):
+ """
+ A passed idLambda function should produce the expected read ids.
+ """
+ read = Read('id1', 'ATCGCC')
+ reads = Reads(initialReads=[read])
+ result = reads.filter(idLambda='lambda id: "x-" + id.upper()')
+ self.assertEqual('x-ID1', list(result)[0].id)
+
+ def testIdLambdaReturningNone(self):
+ """
+ A passed idLambda function should produce the expected read ids,
+ including when it returns None.
+ """
+ read1 = Read('id1', 'ATCGCC')
+ read2 = Read('id2', 'GGATCG')
+ reads = Reads(initialReads=[read1, read2])
+ result = reads.filter(
+ idLambda='lambda id: "aa" if id.find("1") > -1 else None')
+ (result,) = list(result)
+ self.assertEqual('aa', result.id)
+
+ def testReadLambda(self):
+ """
+ A passed readLambda function should produce the expected reads.
+ """
+ read = Read('id1', 'ATCGCC')
+ reads = Reads(initialReads=[read])
+ result = reads.filter(readLambda='lambda r: Read("hey", "AAA")')
+ (result,) = list(result)
+ self.assertEqual(Read('hey', 'AAA'), result)
+
+ def testReadLambdaReturningNone(self):
+ """
+ A passed readLambda function should produce the expected reads,
+ including when it returns None.
+ """
+ read1 = Read('xid1', 'ATCGCC')
+ read2 = Read('yid2', 'GGATCG')
+ reads = Reads(initialReads=[read1, read2])
+ result = reads.filter(
+ readLambda=('lambda r: Read(r.id + "-x", r.sequence[:2]) '
+ 'if r.id.startswith("x") else None'))
+ (result,) = list(result)
+ self.assertEqual(Read('xid1-x', 'AT'), result)
+
class TestReadsInRAM(TestCase):
"""
diff --git a/conans/client/build/autotools_environment.py b/conans/client/build/autotools_environment.py
index 924161e9c..9bf4bd3e8 100644
--- a/conans/client/build/autotools_environment.py
+++ b/conans/client/build/autotools_environment.py
@@ -14,6 +14,7 @@ from conans.client.tools.win import unix_path
from conans.tools import (environment_append, args_to_string, cpu_count, cross_building,
detected_architecture, get_gnu_triplet)
from conans.errors import ConanException
+from conans.util.files import get_abs_path
class AutoToolsBuildEnvironment(object):
@@ -131,7 +132,9 @@ class AutoToolsBuildEnvironment(object):
triplet_args.append("--target=%s" % (target or self.target))
if pkg_config_paths:
- pkg_env = {"PKG_CONFIG_PATH": os.pathsep.join(pkg_config_paths)}
+ pkg_env = {"PKG_CONFIG_PATH":
+ os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
+ for f in pkg_config_paths)}
else:
# If we are using pkg_config generator automate the pcs location, otherwise it could
# read wrong files
diff --git a/conans/client/build/cmake.py b/conans/client/build/cmake.py
index 9964d0836..b5f8cb843 100644
--- a/conans/client/build/cmake.py
+++ b/conans/client/build/cmake.py
@@ -12,7 +12,7 @@ from conans.errors import ConanException
from conans.model.conan_file import ConanFile
from conans.model.version import Version
from conans.util.env_reader import get_env
-from conans.util.files import mkdir
+from conans.util.files import mkdir, get_abs_path
from conans.tools import cpu_count, args_to_string
from conans import tools
from conans.util.log import logger
@@ -28,7 +28,8 @@ def _get_env_cmake_system_name():
class CMake(object):
def __init__(self, conanfile, generator=None, cmake_system_name=True,
- parallel=True, build_type=None, toolset=None, make_program=None, set_cmake_flags=False):
+ parallel=True, build_type=None, toolset=None, make_program=None,
+ set_cmake_flags=False):
"""
:param settings_or_conanfile: Conanfile instance (or settings for retro compatibility)
:param generator: Generator name to use or none to autodetect
@@ -370,7 +371,8 @@ class CMake(object):
self._conanfile.run(command)
def configure(self, args=None, defs=None, source_dir=None, build_dir=None,
- source_folder=None, build_folder=None, cache_build_folder=None):
+ source_folder=None, build_folder=None, cache_build_folder=None,
+ pkg_config_paths=None):
# TODO: Deprecate source_dir and build_dir in favor of xxx_folder
if not self._conanfile.should_configure:
@@ -387,12 +389,26 @@ class CMake(object):
defs_to_string(defs),
args_to_string([source_dir])
])
- command = "cd %s && cmake %s" % (args_to_string([self.build_dir]), arg_list)
- if platform.system() == "Windows" and self.generator == "MinGW Makefiles":
- with tools.remove_from_path("sh"):
- self._run(command)
+
+
+ if pkg_config_paths:
+ pkg_env = {"PKG_CONFIG_PATH":
+ os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
+ for f in pkg_config_paths)}
else:
- self._run(command)
+ # If we are using pkg_config generator automate the pcs location, otherwise it could
+ # read wrong files
+ set_env = "pkg_config" in self._conanfile.generators \
+ and "PKG_CONFIG_PATH" not in os.environ
+ pkg_env = {"PKG_CONFIG_PATH": self._conanfile.install_folder} if set_env else {}
+
+ with tools.environment_append(pkg_env):
+ command = "cd %s && cmake %s" % (args_to_string([self.build_dir]), arg_list)
+ if platform.system() == "Windows" and self.generator == "MinGW Makefiles":
+ with tools.remove_from_path("sh"):
+ self._conanfile.run(command)
+ else:
+ self._conanfile.run(command)
def build(self, args=None, build_dir=None, target=None):
if not self._conanfile.should_build:
diff --git a/conans/client/build/meson.py b/conans/client/build/meson.py
index 1545a59d7..b8a7ff4b3 100644
--- a/conans/client/build/meson.py
+++ b/conans/client/build/meson.py
@@ -4,7 +4,7 @@ from conans import tools
from conans.client import join_arguments, defs_to_string
from conans.errors import ConanException
from conans.tools import args_to_string
-from conans.util.files import mkdir
+from conans.util.files import mkdir, get_abs_path
class Meson(object):
@@ -53,14 +53,6 @@ class Meson(object):
def build_folder(self, value):
self.build_dir = value
- @staticmethod
- def _get_dir(folder, origin):
- if folder:
- if os.path.isabs(folder):
- return folder
- return os.path.join(origin, folder)
- return origin
-
def _get_dirs(self, source_folder, build_folder, source_dir, build_dir, cache_build_folder):
if (source_folder or build_folder) and (source_dir or build_dir):
raise ConanException("Use 'build_folder'/'source_folder'")
@@ -69,11 +61,11 @@ class Meson(object):
build_ret = build_dir or self.build_dir or self._conanfile.build_folder
source_ret = source_dir or self._conanfile.source_folder
else:
- build_ret = self._get_dir(build_folder, self._conanfile.build_folder)
- source_ret = self._get_dir(source_folder, self._conanfile.source_folder)
+ build_ret = get_abs_path(build_folder, self._conanfile.build_folder)
+ source_ret = get_abs_path(source_folder, self._conanfile.source_folder)
if self._conanfile.in_local_cache and cache_build_folder:
- build_ret = self._get_dir(cache_build_folder, self._conanfile.build_folder)
+ build_ret = get_abs_path(cache_build_folder, self._conanfile.build_folder)
return source_ret, build_ret
@@ -90,7 +82,7 @@ class Meson(object):
cache_build_folder)
if pkg_config_paths:
- pc_paths = os.pathsep.join(self._get_dir(f, self._conanfile.install_folder)
+ pc_paths = os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)
else:
pc_paths = self._conanfile.install_folder
diff --git a/conans/util/files.py b/conans/util/files.py
index d8492cd72..8c6a859a1 100644
--- a/conans/util/files.py
+++ b/conans/util/files.py
@@ -181,6 +181,14 @@ def relative_dirs(path):
return ret
+def get_abs_path(folder, origin):
+ if folder:
+ if os.path.isabs(folder):
+ return folder
+ return os.path.join(origin, folder)
+ return origin
+
+
def _change_permissions(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
CMake build wrapper should set PKG_CONFIG_PATH
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
conan version 1.0.4 or master
A lot of cmake scripts use both `find_package` (`FindFoo.cmake`-based) and `pkg_check_modules` (`pkg-config`-based). CMake build wrapper should automatically provide `PKG_CONFIG_PATH` env var set to build directory or to recipe-provided paths. Exact same behavior is seen in `AutoToolsBuildEnviroment` or `Meson`. `CMake` should not be an exception.
diff --git a/pyticketswitch/client.py b/pyticketswitch/client.py
index 8b00be5..7726b43 100644
--- a/pyticketswitch/client.py
+++ b/pyticketswitch/client.py
@@ -1081,7 +1081,8 @@ class Client(object):
def get_trolley(self, token=None, number_of_seats=None, discounts=None,
seats=None, send_codes=None, ticket_type_code=None,
performance_id=None, price_band_code=None,
- item_numbers_to_remove=None, **kwargs):
+ item_numbers_to_remove=None,
+ raise_on_unavailable_order=False, **kwargs):
"""Retrieve the contents of a trolley from the API.
@@ -1097,14 +1098,17 @@ class Client(object):
seats (list): list of seat IDs.
send_codes (dict): send codes indexed on backend source
code.
- ticket_type_code: (string): code of ticket type to add to
+ ticket_type_code (string): code of ticket type to add to
the trolley.
- performance_id: (string): id of the performance to add to
+ performance_id (string): id of the performance to add to
the trolley.
- price_band_code: (string): code of price band to add to
+ price_band_code (string): code of price band to add to
the trolley.
- item_numbers_to_remove: (list): list of item numbers to
+ item_numbers_to_remove (list): list of item numbers to
remove from trolley.
+ raise_on_unavailable_order (bool): When set to ``True`` this method
+ will raise an exception when the API was not able to add an
+ order to the trolley as it was unavailable.
**kwargs: arbitary additional raw keyword arguments to add the
parameters.
@@ -1116,6 +1120,9 @@ class Client(object):
Raises:
InvalidParametersError: when there is an issue with the provided
parameters.
+ OrderUnavailableError: when ``raise_on_unavailable_order`` is set
+ to ``True`` and the requested addition to a trolley was
+ unavailable.
.. _`/f13/trolley.v1`: http://docs.ingresso.co.uk/#trolley
@@ -1133,6 +1140,11 @@ class Client(object):
trolley = Trolley.from_api_data(response)
meta = CurrencyMeta.from_api_data(response)
+ if raise_on_unavailable_order:
+ if trolley and trolley.input_contained_unavailable_order:
+ raise exceptions.OrderUnavailableError(
+ "inputs contained unavailable order")
+
return trolley, meta
def get_upsells(self, token=None, number_of_seats=None, discounts=None,
@@ -1278,7 +1290,8 @@ class Client(object):
def make_reservation(self, token=None, number_of_seats=None, discounts=None,
seats=None, send_codes=None, ticket_type_code=None,
performance_id=None, price_band_code=None,
- item_numbers_to_remove=None, **kwargs):
+ item_numbers_to_remove=None,
+ raise_on_unavailable_order=False, **kwargs):
"""Attempt to reserve all the items in the given trolley
@@ -1314,6 +1327,9 @@ class Client(object):
the trolley
item_numbers_to_remove: (list): list of item numbers to
remove from trolley.
+ raise_on_unavailable_order (bool): When set to ``True`` this method
+ will raise an exception when the API was not able to add an
+ order to the trolley as it was unavailable.
**kwargs: arbitary additional raw keyword arguments to add the
parameters.
@@ -1325,6 +1341,9 @@ class Client(object):
Raises:
InvalidParametersError: when there is an issue with the provided
parameters.
+ OrderUnavailableError: when ``raise_on_unavailable_order`` is set
+ to ``True`` and the requested addition to a trolley was
+ unavailable.
.. _`/f13/reserve.v1`: http://docs.ingresso.co.uk/#reserve
@@ -1342,15 +1361,22 @@ class Client(object):
reservation = Reservation.from_api_data(response)
meta = CurrencyMeta.from_api_data(response)
+ if raise_on_unavailable_order:
+ if reservation and reservation.input_contained_unavailable_order:
+ raise exceptions.OrderUnavailableError(
+ "inputs contained unavailable order")
+
return reservation, meta
- def release_reservation(self, transaction_uuid):
+ def release_reservation(self, transaction_uuid, **kwargs):
"""Release an existing reservation.
Wraps `/f13/release.v1`_
Args:
transaction_uuid (str): the identifier of the reservaiton.
+ **kwargs: arbitary additional raw keyword arguments to add the
+ parameters.
Returns:
bool: :obj:`True` if the reservation was successfully released
@@ -1361,7 +1387,8 @@ class Client(object):
"""
params = {'transaction_uuid': transaction_uuid}
- response = self.make_request('release.v1', params, method=POST)
+ kwargs.update(params)
+ response = self.make_request('release.v1', kwargs, method=POST)
return response.get('released_ok', False)
diff --git a/pyticketswitch/exceptions.py b/pyticketswitch/exceptions.py
index f88f636..3aef367 100644
--- a/pyticketswitch/exceptions.py
+++ b/pyticketswitch/exceptions.py
@@ -51,3 +51,7 @@ class BackendThrottleError(BackendError):
class CallbackGoneError(APIError):
pass
+
+
+class OrderUnavailableError(PyticketswitchError):
+ pass
diff --git a/pyticketswitch/reservation.py b/pyticketswitch/reservation.py
index b12d2bb..a75087e 100644
--- a/pyticketswitch/reservation.py
+++ b/pyticketswitch/reservation.py
@@ -46,9 +46,12 @@ class Reservation(Status):
"""
- def __init__(self, unreserved_orders=None, *args, **kwargs):
+ def __init__(self, unreserved_orders=None,
+ input_contained_unavailable_order=False, *args, **kwargs):
+
super(Reservation, self).__init__(*args, **kwargs)
self.unreserved_orders = unreserved_orders
+ self.input_contained_unavailable_order = input_contained_unavailable_order
@classmethod
def from_api_data(cls, data):
@@ -75,7 +78,9 @@ class Reservation(Status):
for order in raw_unreserved_orders
]
- inst.unreserved_orders=unreserved_orders
+ inst.unreserved_orders = unreserved_orders
+ inst.input_contained_unavailable_order = data.get(
+ 'input_contained_unavailable_order', False)
return inst
diff --git a/pyticketswitch/trolley.py b/pyticketswitch/trolley.py
index 0a78e2e..df54c75 100644
--- a/pyticketswitch/trolley.py
+++ b/pyticketswitch/trolley.py
@@ -25,11 +25,14 @@ class Trolley(JSONMixin, object):
order_count (int): the number of orders in the trolley.
purchase_result (:class:`PurchaseResult <pyticketswitch.callout.Callout>`):
the result of the purchase attempt when available.
-
+ input_contained_unavailable_order (bool): indicates that the call used
+ to create or modify this trolley object included at least one order
+ that was not available.
"""
def __init__(self, token=None, transaction_uuid=None, transaction_id=None,
bundles=None, discarded_orders=None, minutes_left=None,
- order_count=None, purchase_result=None):
+ order_count=None, purchase_result=None,
+ input_contained_unavailable_order=False):
self.token = token
self.transaction_uuid = transaction_uuid
self.transaction_id = transaction_id
@@ -38,6 +41,7 @@ class Trolley(JSONMixin, object):
self.minutes_left = minutes_left
self.order_count = order_count
self.purchase_result = purchase_result
+ self.input_contained_unavailable_order = input_contained_unavailable_order
@classmethod
def from_api_data(cls, data):
@@ -82,6 +86,8 @@ class Trolley(JSONMixin, object):
'transaction_uuid': raw_contents.get('transaction_uuid'),
'transaction_id': raw_contents.get('transaction_id'),
'order_count': data.get('trolley_order_count'),
+ 'input_contained_unavailable_order': data.get(
+ 'input_contained_unavailable_order', False),
}
minutes = data.get('minutes_left_on_reserve')
missing "input_contained_unavailable_order" flag in trolley/reservation response
Currently when you attempt to add something to a trolley that is not available (sold out, performance in the past, max tickets per order exceeded, etc) it just appears as an empty trolley without any indication that something has gone wrong.
The API returns the `input_contained_unavailable_order` flag in it's response from `trolley.v1` and `reserve.v1`, and this should be added to the trolley object. I would suggest we should look at raising an exception as well.
ingresso-group/pyticketswitch
diff --git a/tests/test_client.py b/tests/test_client.py
index 6dfc234..c339059 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1320,6 +1320,34 @@ class TestClient:
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
+ def test_get_trolley_with_unavailable_order(self, client, monkeypatch):
+ """
+ This test is to check that an unavailable order doesn't raise
+ any exceptions unless `raise_on_unavailable_order` is set to true
+ """
+ response = {
+ 'trolley_contents': {},
+ 'trolley_token': 'DEF456',
+ 'currency_code': 'gbp',
+ 'input_contained_unavailable_order': True,
+ 'currency_details': {
+ 'gbp': {
+ 'currency_code': 'gbp',
+ }
+ }
+ }
+
+ mock_make_request = Mock(return_value=response)
+ monkeypatch.setattr(client, 'make_request', mock_make_request)
+
+ # this should not raise any exceptions
+ client.get_trolley()
+
+ # but this should
+ with pytest.raises(exceptions.OrderUnavailableError):
+ client.get_trolley(raise_on_unavailable_order=True)
+
+
def test_get_upsells(self, client, monkeypatch):
# fakes
response = {
@@ -1409,6 +1437,26 @@ class TestClient:
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
+ def test_make_reservation_with_unavailable_order(self, client, monkeypatch):
+ """
+ This test is to check that an unavailable order doesn't raise
+ any exceptions unless `raise_on_unavailable_order` is set to true
+ """
+ data = {
+ "input_contained_unavailable_order": True,
+ "unreserved_orders": [],
+ }
+
+ mock_make_request = Mock(return_value=data)
+ monkeypatch.setattr(client, 'make_request', mock_make_request)
+
+ # this should not raise any exceptions
+ client.make_reservation()
+
+ # but this should
+ with pytest.raises(exceptions.OrderUnavailableError):
+ client.make_reservation(raise_on_unavailable_order=True)
+
def test_get_status(self, client, monkeypatch):
response = {
'trolley_contents': {
diff --git a/tests/test_reservation.py b/tests/test_reservation.py
index 91c0895..28bbf74 100644
--- a/tests/test_reservation.py
+++ b/tests/test_reservation.py
@@ -59,3 +59,13 @@ class TestReservation:
assert len(reservation.unreserved_orders) == 1
assert reservation.minutes_left == 15
+
+ def test_from_api_data_with_unavailable_orders(self):
+ data = {
+ "input_contained_unavailable_order": True,
+ "unreserved_orders": [],
+ }
+
+ reservation = Reservation.from_api_data(data)
+
+ assert reservation.input_contained_unavailable_order is True
diff --git a/tests/test_trolley.py b/tests/test_trolley.py
index fb9b9df..0370757 100644
--- a/tests/test_trolley.py
+++ b/tests/test_trolley.py
@@ -68,6 +68,23 @@ class TestTrolley:
assert trolley.discarded_orders[0].item == 3
assert trolley.discarded_orders[1].item == 6
+ def test_from_api_data_with_empty_trolley(self):
+ data = {
+ "discarded_orders": [],
+ "input_contained_unavailable_order": True,
+ "trolley_token": "abc123",
+ "trolley_token_contents": {
+ "trolley_bundle_count": 0,
+ "trolley_order_count": 0
+ }
+ }
+
+ trolley = Trolley.from_api_data(data)
+
+ assert trolley.token == 'abc123'
+ assert trolley.input_contained_unavailable_order is True
+
+
def test_get_events(self):
event_one = Event(id_='abc123')
diff --git a/datacompy/core.py b/datacompy/core.py
index 7fc296e..e03d75e 100644
--- a/datacompy/core.py
+++ b/datacompy/core.py
@@ -59,6 +59,8 @@ class Compare(object):
more easily track the dataframes.
df2_name : str, optional
A string name for the second dataframe
+ ignore_spaces : bool, optional
+ Flag to strip whitespace (including newlines) from string columns
Attributes
----------
@@ -70,7 +72,7 @@ class Compare(object):
def __init__(
self, df1, df2, join_columns=None, on_index=False, abs_tol=0,
- rel_tol=0, df1_name='df1', df2_name='df2'):
+ rel_tol=0, df1_name='df1', df2_name='df2', ignore_spaces=False):
if on_index and join_columns is not None:
raise Exception('Only provide on_index or join_columns')
@@ -93,7 +95,7 @@ class Compare(object):
self.rel_tol = rel_tol
self.df1_unq_rows = self.df2_unq_rows = self.intersect_rows = None
self.column_stats = []
- self._compare()
+ self._compare(ignore_spaces)
@property
def df1(self):
@@ -143,7 +145,7 @@ class Compare(object):
if len(dataframe.drop_duplicates(subset=self.join_columns)) < len(dataframe):
self._any_dupes = True
- def _compare(self):
+ def _compare(self, ignore_spaces):
"""Actually run the comparison. This tries to run df1.equals(df2)
first so that if they're truly equal we can tell.
@@ -167,8 +169,8 @@ class Compare(object):
LOG.info('Number of columns in df2 and not in df1: {}'.format(
len(self.df2_unq_columns())))
LOG.debug('Merging dataframes')
- self._dataframe_merge()
- self._intersect_compare()
+ self._dataframe_merge(ignore_spaces)
+ self._intersect_compare(ignore_spaces)
if self.matches():
LOG.info('df1 matches df2')
else:
@@ -186,7 +188,7 @@ class Compare(object):
"""Get columns that are shared between the two dataframes"""
return set(self.df1.columns) & set(self.df2.columns)
- def _dataframe_merge(self):
+ def _dataframe_merge(self, ignore_spaces):
"""Merge df1 to df2 on the join columns, to get df1 - df2, df2 - df1
and df1 & df2
@@ -262,7 +264,7 @@ class Compare(object):
'Number of rows in df1 and df2 (not necessarily equal): {}'.format(
len(self.intersect_rows)))
- def _intersect_compare(self):
+ def _intersect_compare(self, ignore_spaces):
"""Run the comparison on the intersect dataframe
This loops through all columns that are shared between df1 and df2, and
@@ -285,7 +287,8 @@ class Compare(object):
self.intersect_rows[col_1],
self.intersect_rows[col_2],
self.rel_tol,
- self.abs_tol)
+ self.abs_tol,
+ ignore_spaces)
match_cnt = self.intersect_rows[col_match].sum()
try:
@@ -570,7 +573,7 @@ def render(filename, *fields):
return file_open.read().format(*fields)
-def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):
+def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False):
"""Compares two columns from a dataframe, returning a True/False series,
with the same index as column 1.
@@ -592,6 +595,8 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):
Relative tolerance
abs_tol : float, optional
Absolute tolerance
+ ignore_spaces : bool, optional
+ Flag to strip whitespace (including newlines) from string columns
Returns
-------
@@ -616,6 +621,12 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):
equal_nan=True))
except (ValueError, TypeError):
try:
+ if ignore_spaces:
+ if col_1.dtype.kind == 'O':
+ col_1 = col_1.str.strip()
+ if col_2.dtype.kind == 'O':
+ col_2 = col_2.str.strip()
+
if set([col_1.dtype.kind, col_2.dtype.kind]) == set(['M','O']):
compare = compare_string_and_date_columns(col_1, col_2)
else:
Would be useful to have a parameter to strip spaces for comparison
As probably expected, the following code will return a mismatch since 'B'<>'B ':
```
import pandas as pd
import datacompy
df1 = pd.DataFrame([
{'id': 1234, 'column_value': 'A'},
{'id': 2345, 'column_value': 'B'}])
df2 = pd.DataFrame([
{'id': 1234, 'column_value': 'A'},
{'id': 2345, 'column_value': 'B '}])
compare = datacompy.Compare(
df1,
df2,
join_columns='id',
abs_tol=0,
rel_tol=0,
)
compare.matches(ignore_extra_columns=False)
# False
# This method prints out a human-readable report summarizing and sampling differences
print(compare.report())
```
What I propose is an optional parameter to ignore differences where the only difference is leading or trailing spaces. In this example it is obvious that there is a trailing space. However, when we are dealing with extracts from different databases/source files, without real control over the ETL of these, sometimes we can't prevent these discrepancies. We may wish to ignore these types of mismatches to identify 'worse' mismatches more effectively.
Another candidate could be ignoring case sensitivity differences.
Of course these could both be easily handled with preprocessing the dataframes, but still could be some convenient enhancements!
diff --git a/hashin.py b/hashin.py
index c1bb79b..1590560 100755
--- a/hashin.py
+++ b/hashin.py
@@ -58,6 +58,11 @@ parser.add_argument(
help='Verbose output',
action='store_true',
)
+parser.add_argument(
+ '--include-prereleases',
+ help='Include pre-releases (off by default)',
+ action='store_true',
+)
parser.add_argument(
'-p', '--python-version',
help='Python version to add wheels for. May be used multiple times.',
@@ -83,6 +88,10 @@ class PackageError(Exception):
pass
+class NoVersionsError(Exception):
+ """When there are no valid versions found."""
+
+
def _verbose(*args):
print('* ' + ' '.join(args))
@@ -127,6 +136,7 @@ def run_single_package(
algorithm,
python_versions=None,
verbose=False,
+ include_prereleases=False,
):
restriction = None
if ';' in spec:
@@ -143,7 +153,8 @@ def run_single_package(
version=version,
verbose=verbose,
python_versions=python_versions,
- algorithm=algorithm
+ algorithm=algorithm,
+ include_prereleases=include_prereleases,
)
package = data['package']
@@ -202,7 +213,7 @@ def amend_requirements_content(requirements, package, new_lines):
return requirements
-def get_latest_version(data):
+def get_latest_version(data, include_prereleases):
"""
Return the version string of what we think is the latest version.
In the data blob from PyPI there is the info->version key which
@@ -214,11 +225,22 @@ def get_latest_version(data):
# This feels kinda strange but it has worked for years
return data['info']['version']
all_versions = []
+ count_prereleases = 0
for version in data['releases']:
v = parse(version)
- if not v.is_prerelease:
+ if not v.is_prerelease or include_prereleases:
all_versions.append((v, version))
+ else:
+ count_prereleases += 1
all_versions.sort(reverse=True)
+ if not all_versions:
+ msg = "Not a single valid version found."
+ if not include_prereleases and count_prereleases:
+ msg += (
+ " But, found {0} pre-releases. Consider running again "
+ "with the --include-prereleases flag."
+ )
+ raise NoVersionsError(msg)
# return the highest non-pre-release version
return str(all_versions[0][1])
@@ -378,6 +400,7 @@ def get_package_hashes(
algorithm=DEFAULT_ALGORITHM,
python_versions=(),
verbose=False,
+ include_prereleases=False,
):
"""
Gets the hashes for the given package.
@@ -404,7 +427,7 @@ def get_package_hashes(
"""
data = get_package_data(package, verbose)
if not version:
- version = get_latest_version(data)
+ version = get_latest_version(data, include_prereleases)
assert version
if verbose:
_verbose('Latest version for {0} is {1}'.format(
@@ -472,6 +495,7 @@ def main():
args.algorithm,
args.python_version,
verbose=args.verbose,
+ include_prereleases=args.include_prereleases,
)
except PackageError as exception:
print(str(exception), file=sys.stderr)
`hashin black` fails
```
▶ hashin black
Traceback (most recent call last):
File "/usr/local/bin/hashin", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 474, in main
verbose=args.verbose,
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 120, in run
run_single_package(spec, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 146, in run_single_package
algorithm=algorithm
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 407, in get_package_hashes
version = get_latest_version(data)
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 223, in get_latest_version
return str(all_versions[0][1])
IndexError: list index out of range
```
diff --git a/hxl/validation.py b/hxl/validation.py
index 3eb1eba..922bfaf 100644
--- a/hxl/validation.py
+++ b/hxl/validation.py
@@ -1508,11 +1508,6 @@ def validate(data, schema=None):
issue_map = dict()
- def make_rule_hash(rule):
- """Make a good-enough hash for a rule."""
- s = "\r".join([str(rule.severity), str(rule.description), str(rule.tag_pattern)])
- return base64.urlsafe_b64encode(hashlib.md5(s.encode('utf-8')).digest())[:8].decode('ascii')
-
def add_issue(issue):
hash = make_rule_hash(issue.rule)
issue_map.setdefault(hash, []).append(issue)
@@ -1562,9 +1557,10 @@ def make_json_report(status, issue_map, schema_url=None, data_url=None):
# add the issue objects
for rule_id, locations in issue_map.items():
- json_report['stats']['total'] += len(locations)
- json_report['stats'][locations[0].rule.severity] += len(locations)
- json_report['issues'].append(make_json_issue(rule_id, locations))
+ json_issue = make_json_issue(rule_id, locations)
+ json_report['stats']['total'] += len(json_issue['locations'])
+ json_report['stats'][locations[0].rule.severity] += len(json_issue['locations'])
+ json_report['issues'].append(json_issue)
return json_report
@@ -1581,6 +1577,15 @@ def make_json_issue(rule_id, locations):
if not description:
description = model.message
+ # get all unique locations
+ location_keys = set()
+ json_locations = []
+ for location in locations:
+ location_key = (location.row.row_number, location.column.column_number, location.value, location.suggested_value,)
+ if not location_key in location_keys:
+ json_locations.append(make_json_location(location))
+ location_keys.add(location_key)
+
# make the issue
json_issue = {
"rule_id": rule_id,
@@ -1589,7 +1594,7 @@ def make_json_issue(rule_id, locations):
"severity": model.rule.severity,
"location_count": len(locations),
"scope": model.scope,
- "locations": [make_json_location(location) for location in locations]
+ "locations": json_locations
}
return json_issue
@@ -1622,4 +1627,10 @@ def make_json_location(location):
return json_location
+
+def make_rule_hash(rule):
+ """Make a good-enough hash for a rule."""
+ s = "\r".join([str(rule.severity), str(rule.description), str(rule.tag_pattern)])
+ return base64.urlsafe_b64encode(hashlib.md5(s.encode('utf-8')).digest())[:8].decode('ascii')
+
# end
Double counting of errors p-code adm name combination consistency errors
When I put the below into data check, I get 2 of every cell eg. F3,F3,F4,F4,F5,F5...
https://data.humdata.org/dataset/77c97850-4004-4285-94db-0b390a962d6e/resource/d6c0dbac-683d-42d7-82b4-a6379bd4f48e/download/mrt_population_statistics_ons_rgph_2013_2017.xlsx
HXLStandard/libhxl-python
diff --git a/tests/test_validation.py b/tests/test_validation.py
index 43ab00b..7e8b4f3 100644
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -655,8 +655,8 @@ class TestValidateDataset(unittest.TestCase):
def test_double_correlation(self):
"""Test correlation when more than one column has same tagspec"""
SCHEMA = [
- ['#valid_tag', '#valid_correlation'],
- ['#adm1+code', '#adm1+name']
+ ['#valid_tag', '#description', '#valid_correlation', '#valid_value+list'],
+ ['#adm1+code', 'xxxxx', '#adm1+name', 'X001|X002']
]
DATASET = [
['#adm1+name', '#adm1+code', '#adm1+code'],
diff --git a/graphene/types/inputobjecttype.py b/graphene/types/inputobjecttype.py
index dbfccc4..b84fc0f 100644
--- a/graphene/types/inputobjecttype.py
+++ b/graphene/types/inputobjecttype.py
@@ -50,7 +50,10 @@ class InputObjectType(UnmountedType, BaseType):
yank_fields_from_attrs(base.__dict__, _as=InputField)
)
- _meta.fields = fields
+ if _meta.fields:
+ _meta.fields.update(fields)
+ else:
+ _meta.fields = fields
if container is None:
container = type(cls.__name__, (InputObjectTypeContainer, cls), {})
_meta.container = container
InputObjectType.__init_sublcass_with_meta__ overwrites passed _meta.fields
In `InputObjectType.__init_subclass_with_meta__`, the`fields` of the `_meta` arg are overwritten, which can cause complications for subclassing.
@classmethod
def __init_subclass_with_meta__(cls, container=None, _meta=None, **options):
if not _meta:
_meta = InputObjectTypeOptions(cls)
fields = OrderedDict()
for base in reversed(cls.__mro__):
fields.update(
yank_fields_from_attrs(base.__dict__, _as=InputField)
)
_meta.fields = fields
# should this be:
# if _meta.fields:
# _meta.fields.update(fields)
# else:
# _meta.fields = fields
graphql-python/graphene
diff --git a/graphene/tests/issues/test_720.py b/graphene/tests/issues/test_720.py
new file mode 100644
index 0000000..8cd99bd
--- /dev/null
+++ b/graphene/tests/issues/test_720.py
@@ -0,0 +1,44 @@
+# https://github.com/graphql-python/graphene/issues/720
+# InputObjectTypes overwrite the "fields" attribute of the provided
+# _meta object, so even if dynamic fields are provided with a standard
+# InputObjectTypeOptions, they are ignored.
+
+import graphene
+
+
+class MyInputClass(graphene.InputObjectType):
+
+ @classmethod
+ def __init_subclass_with_meta__(
+ cls, container=None, _meta=None, fields=None, **options):
+ if _meta is None:
+ _meta = graphene.types.inputobjecttype.InputObjectTypeOptions(cls)
+ _meta.fields = fields
+ super(MyInputClass, cls).__init_subclass_with_meta__(
+ container=container, _meta=_meta, **options)
+
+
+class MyInput(MyInputClass):
+
+ class Meta:
+ fields = dict(x=graphene.Field(graphene.Int))
+
+
+class Query(graphene.ObjectType):
+ myField = graphene.Field(graphene.String, input=graphene.Argument(MyInput))
+
+ def resolve_myField(parent, info, input):
+ return 'ok'
+
+
+def test_issue():
+ query_string = '''
+ query myQuery {
+ myField(input: {x: 1})
+ }
+ '''
+
+ schema = graphene.Schema(query=Query)
+ result = schema.execute(query_string)
+
+ assert not result.errors
diff --git a/swagger_spec_validator/validator20.py b/swagger_spec_validator/validator20.py
index fe17ded..002eb44 100644
--- a/swagger_spec_validator/validator20.py
+++ b/swagger_spec_validator/validator20.py
@@ -268,6 +268,15 @@ def validate_defaults_in_definition(definition_spec, deref):
validate_property_default(property_spec, deref)
+def validate_arrays_in_definition(definition_spec, def_name=None):
+ if definition_spec.get('type') == 'array' and 'items' not in definition_spec:
+ raise SwaggerValidationError(
+ 'Definition of type array must define `items` property{}.'.format(
+ '' if not def_name else ' (definition {})'.format(def_name),
+ ),
+ )
+
+
def validate_definition(definition, deref, def_name=None):
definition = deref(definition)
@@ -286,6 +295,7 @@ def validate_definition(definition, deref, def_name=None):
)
validate_defaults_in_definition(definition, deref)
+ validate_arrays_in_definition(definition, def_name=def_name)
if 'discriminator' in definition:
required_props, not_required_props = get_collapsed_properties_type_mappings(definition, deref)
Spec validation will not fail if items is not present and type is array
The following specs are not valid according to [Swagger 2.0 Specs](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameter-object), editor.swagger.io and according to `swagger-tools` npm package.
```yaml
swagger: '2.0'
info:
title: Example
produces:
- application/json
paths:
/test:
get:
responses:
'200':
description: HTTP200
schema:
type: array
```
Error reported by editor.swagger.io

Error reported by npm
```
API Errors:
#/paths/~1test/get/responses/200/schema: Missing required property: items
1 error and 0 warnings
```
Yelp/swagger_spec_validator
diff --git a/tests/validator20/validate_definitions_test.py b/tests/validator20/validate_definitions_test.py
index 0b61dc0..6c2b6aa 100644
--- a/tests/validator20/validate_definitions_test.py
+++ b/tests/validator20/validate_definitions_test.py
@@ -95,3 +95,30 @@ def test_api_check_default_fails(property_spec, validator, instance):
validation_error = excinfo.value.args[1]
assert validation_error.instance == instance
assert validation_error.validator == validator
+
+
+def test_type_array_with_items_succeed_validation():
+ definitions = {
+ 'definition_1': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ },
+ }
+
+ # Success if no exception are raised
+ validate_definitions(definitions, lambda x: x)
+
+
+def test_type_array_without_items_succeed_fails():
+ definitions = {
+ 'definition_1': {
+ 'type': 'array',
+ },
+ }
+
+ with pytest.raises(SwaggerValidationError) as excinfo:
+ validate_definitions(definitions, lambda x: x)
+
+ assert str(excinfo.value) == 'Definition of type array must define `items` property (definition definition_1).'
diff --git a/tests/validator20/validate_spec_test.py b/tests/validator20/validate_spec_test.py
index 5bc9e53..981255c 100644
--- a/tests/validator20/validate_spec_test.py
+++ b/tests/validator20/validate_spec_test.py
@@ -341,3 +341,37 @@ def test_failure_because_references_in_operation_responses():
validate_spec(invalid_spec)
assert 'GET /endpoint does not have a valid responses section. ' \
'That section cannot be just a reference to another object.' in str(excinfo.value)
+
+
+def test_type_array_with_items_succeed_validation(minimal_swagger_dict):
+ minimal_swagger_dict['definitions'] = {
+ 'definition_1': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ },
+ }
+
+ # Success if no exception are raised
+ validate_spec(minimal_swagger_dict)
+
+
[email protected](
+ 'swagger_dict_override',
+ (
+ {
+ 'definitions': {
+ 'definition_1': {
+ 'type': 'array',
+ },
+ },
+ },
+ )
+)
+def test_type_array_without_items_succeed_fails(minimal_swagger_dict, swagger_dict_override):
+ minimal_swagger_dict.update(swagger_dict_override)
+ with pytest.raises(SwaggerValidationError) as excinfo:
+ validate_spec(minimal_swagger_dict)
+
+ assert str(excinfo.value) == 'Definition of type array must define `items` property (definition definition_1).'
diff --git a/examples/consume.py b/examples/consume.py
index da95d9e..7344149 100644
--- a/examples/consume.py
+++ b/examples/consume.py
@@ -1,17 +1,15 @@
+import functools
+import logging
import pika
-def on_message(channel, method_frame, header_frame, body):
- channel.queue_declare(queue=body, auto_delete=True)
+LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
+ '-35s %(lineno) -5d: %(message)s')
+LOGGER = logging.getLogger(__name__)
- if body.startswith("queue:"):
- queue = body.replace("queue:", "")
- key = body + "_key"
- print("Declaring queue %s bound with key %s" %(queue, key))
- channel.queue_declare(queue=queue, auto_delete=True)
- channel.queue_bind(queue=queue, exchange="test_exchange", routing_key=key)
- else:
- print("Message body", body)
+logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
+def on_message(channel, method_frame, header_frame, body, userdata=None):
+ LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
credentials = pika.PlainCredentials('guest', 'guest')
@@ -24,7 +22,8 @@ channel.queue_declare(queue="standard", auto_delete=True)
channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
channel.basic_qos(prefetch_count=1)
-channel.basic_consume(on_message, 'standard')
+on_message_callback = functools.partial(on_message, userdata='on_message_userdata')
+channel.basic_consume(on_message_callback, 'standard')
try:
channel.start_consuming()
diff --git a/pika/heartbeat.py b/pika/heartbeat.py
index c02d5df..8d3d20a 100644
--- a/pika/heartbeat.py
+++ b/pika/heartbeat.py
@@ -23,13 +23,22 @@ class HeartbeatChecker(object):
:param pika.connection.Connection: Connection object
:param int interval: Heartbeat check interval. Note: heartbeats will
be sent at interval / 2 frequency.
+ :param int idle_count: The number of heartbeat intervals without data
+ received that will close the current connection.
"""
self._connection = connection
+
# Note: see the following document:
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
self._interval = float(interval / 2)
- self._max_idle_count = idle_count
+
+ # Note: even though we're sending heartbeats in half the specified
+ # interval, the broker will be sending them to us at the specified
+ # interval. This means we'll be checking for an idle connection
+ # twice as many times as the broker will send heartbeats to us,
+ # so we need to double the max idle count here
+ self._max_idle_count = idle_count * 2
# Initialize counters
self._bytes_received = 0
@@ -82,9 +91,12 @@ class HeartbeatChecker(object):
been idle too long.
"""
- LOGGER.debug('Received %i heartbeat frames, sent %i',
+ LOGGER.debug('Received %i heartbeat frames, sent %i, '
+ 'idle intervals %i, max idle count %i',
self._heartbeat_frames_received,
- self._heartbeat_frames_sent)
+ self._heartbeat_frames_sent,
+ self._idle_byte_intervals,
+ self._max_idle_count)
if self.connection_is_idle:
return self._close_connection()
HeartbeatChecker is confused about heartbeat timeouts
cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate.
`HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`.
So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds.
So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection.
This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window.
I see two problems here:
1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?)
2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
diff --git a/stix2validator/scripts/stix2_validator.py b/stix2validator/scripts/stix2_validator.py
index 15bd7b0..8dda167 100644
--- a/stix2validator/scripts/stix2_validator.py
+++ b/stix2validator/scripts/stix2_validator.py
@@ -292,9 +292,6 @@ def main():
options = ValidationOptions(args)
try:
- # Set the output level (e.g., quiet vs. verbose)
- output.set_level(options.verbose)
-
if not options.no_cache:
init_requests_cache(options.refresh_cache)
diff --git a/stix2validator/util.py b/stix2validator/util.py
index 4da0be5..327931f 100644
--- a/stix2validator/util.py
+++ b/stix2validator/util.py
@@ -1,5 +1,7 @@
from collections import Iterable
+from .output import error, set_level, set_silent
+
class ValidationOptions(object):
"""Collection of validation options which can be set via command line or
@@ -72,6 +74,12 @@ class ValidationOptions(object):
self.refresh_cache = refresh_cache
self.clear_cache = clear_cache
+ # Set the output level (e.g., quiet vs. verbose)
+ if self.silent and self.verbose:
+ error('Error: Output can either be silent or verbose, but not both.')
+ set_level(self.verbose)
+ set_silent(self.silent)
+
# Convert string of comma-separated checks to a list,
# and convert check code numbers to names
if self.disabled:
handle options --verbose and --silent correctly
Related to #50
The correct combination of these two should be as follows:
|--verbose | --silent | desired behavior |
| --- | --- | --- |
|absent (default is False) | absent (default is False) | all messages except those printed by info |
|absent (default is False) | present (True) | no messages printed
| present (True) | absent (default is False) | all messages, including info are printed
| present (True) | present (True) | error |
Current behavior is:
|--verbose | --silent | current behavior |
| --- | --- | --- |
|absent (default is False) | absent (default is False) | all messages except those printed by info |
|absent (default is False) | present (ignored, so the default - False) | all messages except those printed by info |
| present (True) | absent (default is False) | all messages, including info are printed
| present (True) | present (ignored, so the default - False) | all messages, including info are printed |
vitaly-krugl: @lukebakken, let's work out one of these parallel pull requests first, then create the second one after that.
michaelklishin: This [rabbitmq-users thread](https://groups.google.com/d/msg/rabbitmq-users/Fmfeqe5ocTY/0fxMMVsSAgAJ) is worth mentioning.
diff --git a/examples/consume.py b/examples/consume.py
index 7344149..26e4620 100644
--- a/examples/consume.py
+++ b/examples/consume.py
@@ -1,3 +1,4 @@
+"""Basic message consumer example"""
import functools
import logging
import pika
@@ -8,26 +9,36 @@ LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
-def on_message(channel, method_frame, header_frame, body, userdata=None):
- LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body))
- channel.basic_ack(delivery_tag=method_frame.delivery_tag)
-
-credentials = pika.PlainCredentials('guest', 'guest')
-parameters = pika.ConnectionParameters('localhost', credentials=credentials)
-connection = pika.BlockingConnection(parameters)
-
-channel = connection.channel()
-channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True, auto_delete=False)
-channel.queue_declare(queue="standard", auto_delete=True)
-channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
-channel.basic_qos(prefetch_count=1)
-
-on_message_callback = functools.partial(on_message, userdata='on_message_userdata')
-channel.basic_consume(on_message_callback, 'standard')
-
-try:
- channel.start_consuming()
-except KeyboardInterrupt:
- channel.stop_consuming()
-
-connection.close()
+def on_message(chan, method_frame, _header_frame, body, userdata=None):
+ """Called when a message is received. Log message and ack it."""
+ LOGGER.info('Userdata: %s Message body: %s', userdata, body)
+ chan.basic_ack(delivery_tag=method_frame.delivery_tag)
+
+def main():
+ """Main method."""
+ credentials = pika.PlainCredentials('guest', 'guest')
+ parameters = pika.ConnectionParameters('localhost', credentials=credentials)
+ connection = pika.BlockingConnection(parameters)
+
+ channel = connection.channel()
+ channel.exchange_declare(exchange="test_exchange",
+ exchange_type="direct",
+ passive=False,
+ durable=True,
+ auto_delete=False)
+ channel.queue_declare(queue="standard", auto_delete=True)
+ channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
+ channel.basic_qos(prefetch_count=1)
+
+ on_message_callback = functools.partial(on_message, userdata='on_message_userdata')
+ channel.basic_consume(on_message_callback, 'standard')
+
+ try:
+ channel.start_consuming()
+ except KeyboardInterrupt:
+ channel.stop_consuming()
+
+ connection.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/pika/connection.py b/pika/connection.py
index 0c4e2a7..bed9bdb 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -1301,7 +1301,7 @@ class Connection(object):
self._backpressure_multiplier = value
#
- # Connections state properties
+ # Connection state properties
#
@property
diff --git a/pika/heartbeat.py b/pika/heartbeat.py
index 8d3d20a..7d4d7dd 100644
--- a/pika/heartbeat.py
+++ b/pika/heartbeat.py
@@ -7,38 +7,67 @@ LOGGER = logging.getLogger(__name__)
class HeartbeatChecker(object):
- """Checks to make sure that our heartbeat is received at the expected
- intervals.
+ """Sends heartbeats to the broker. The provided timeout is used to
+ determine if the connection is stale - no received heartbeats or
+ other activity will close the connection. See the parameter list for more
+ details.
"""
- DEFAULT_INTERVAL = 60
- MAX_IDLE_COUNT = 2
_CONNECTION_FORCED = 320
- _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds"
+ _STALE_CONNECTION = "No activity or too many missed meartbeats in the last %i seconds"
- def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT):
- """Create a heartbeat on connection sending a heartbeat frame every
- interval seconds.
+ def __init__(self, connection, timeout):
+ """Create an object that will check for activity on the provided
+ connection as well as receive heartbeat frames from the broker. The
+ timeout parameter defines a window within which this activity must
+ happen. If not, the connection is considered dead and closed.
+
+ The value passed for timeout is also used to calculate an interval
+ at which a heartbeat frame is sent to the broker. The interval is
+ equal to the timeout value divided by two.
:param pika.connection.Connection: Connection object
- :param int interval: Heartbeat check interval. Note: heartbeats will
- be sent at interval / 2 frequency.
- :param int idle_count: The number of heartbeat intervals without data
- received that will close the current connection.
+ :param int timeout: Connection idle timeout. If no activity occurs on the
+ connection nor heartbeat frames received during the
+ timeout window the connection will be closed. The
+ interval used to send heartbeats is calculated from
+ this value by dividing it by two.
"""
+ if timeout < 1:
+ raise ValueError('timeout must >= 0, but got %r' % (timeout,))
+
self._connection = connection
- # Note: see the following document:
+ # Note: see the following documents:
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
- self._interval = float(interval / 2)
-
- # Note: even though we're sending heartbeats in half the specified
- # interval, the broker will be sending them to us at the specified
- # interval. This means we'll be checking for an idle connection
- # twice as many times as the broker will send heartbeats to us,
- # so we need to double the max idle count here
- self._max_idle_count = idle_count * 2
+ # https://github.com/pika/pika/pull/1072
+ # https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion
+ # There is a certain amount of confusion around how client developers
+ # interpret the spec. The spec talks about 2 missed heartbeats as a
+ # *timeout*, plus that any activity on the connection counts for a
+ # heartbeat. This is to avoid edge cases and not to depend on network
+ # latency.
+ self._timeout = timeout
+
+ self._send_interval = float(timeout) / 2
+
+ # Note: Pika will calculate the heartbeat / connectivity check interval
+ # by adding 5 seconds to the negotiated timeout to leave a bit of room
+ # for broker heartbeats that may be right at the edge of the timeout
+ # window. This is different behavior from the RabbitMQ Java client and
+ # the spec that suggests a check interval equivalent to two times the
+ # heartbeat timeout value. But, one advantage of adding a small amount
+ # is that bad connections will be detected faster.
+ # https://github.com/pika/pika/pull/1072#issuecomment-397850795
+ # https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780
+ # https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192
+ self._check_interval = timeout + 5
+
+ LOGGER.debug('timeout: %f send_interval: %f check_interval: %f',
+ self._timeout,
+ self._send_interval,
+ self._check_interval)
# Initialize counters
self._bytes_received = 0
@@ -47,21 +76,10 @@ class HeartbeatChecker(object):
self._heartbeat_frames_sent = 0
self._idle_byte_intervals = 0
- # The handle for the last timer
- self._timer = None
-
- # Setup the timer to fire in _interval seconds
- self._setup_timer()
-
- @property
- def active(self):
- """Return True if the connection's heartbeat attribute is set to this
- instance.
-
- :rtype True
-
- """
- return self._connection.heartbeat is self
+ self._send_timer = None
+ self._check_timer = None
+ self._start_send_timer()
+ self._start_check_timer()
@property
def bytes_received_on_connection(self):
@@ -78,74 +96,78 @@ class HeartbeatChecker(object):
to trip the max idle threshold.
"""
- return self._idle_byte_intervals >= self._max_idle_count
+ return self._idle_byte_intervals > 0
def received(self):
"""Called when a heartbeat is received"""
LOGGER.debug('Received heartbeat frame')
self._heartbeat_frames_received += 1
- def send_and_check(self):
- """Invoked by a timer to send a heartbeat when we need to, check to see
+ def _send_heartbeat(self):
+ """Invoked by a timer to send a heartbeat when we need to.
+
+ """
+ LOGGER.debug('Sending heartbeat frame')
+ self._send_heartbeat_frame()
+ self._start_send_timer()
+
+ def _check_heartbeat(self):
+ """Invoked by a timer to check for broker heartbeats. Checks to see
if we've missed any heartbeats and disconnect our connection if it's
been idle too long.
"""
+ if self._has_received_data:
+ self._idle_byte_intervals = 0
+ else:
+ # Connection has not received any data, increment the counter
+ self._idle_byte_intervals += 1
+
LOGGER.debug('Received %i heartbeat frames, sent %i, '
- 'idle intervals %i, max idle count %i',
+ 'idle intervals %i',
self._heartbeat_frames_received,
self._heartbeat_frames_sent,
- self._idle_byte_intervals,
- self._max_idle_count)
+ self._idle_byte_intervals)
if self.connection_is_idle:
- return self._close_connection()
-
- # Connection has not received any data, increment the counter
- if not self._has_received_data:
- self._idle_byte_intervals += 1
- else:
- self._idle_byte_intervals = 0
+ self._close_connection()
+ return
- # Update the counters of bytes sent/received and the frames received
- self._update_counters()
-
- # Send a heartbeat frame
- self._send_heartbeat_frame()
-
- # Update the timer to fire again
- self._start_timer()
+ self._start_check_timer()
def stop(self):
"""Stop the heartbeat checker"""
- if self._timer:
- LOGGER.debug('Removing timeout for next heartbeat interval')
- self._connection.remove_timeout(self._timer)
- self._timer = None
+ if self._send_timer:
+ LOGGER.debug('Removing timer for next heartbeat send interval')
+ self._connection.remove_timeout(self._send_timer) # pylint: disable=W0212
+ self._send_timer = None
+ if self._check_timer:
+ LOGGER.debug('Removing timer for next heartbeat check interval')
+ self._connection.remove_timeout(self._check_timer) # pylint: disable=W0212
+ self._check_timer = None
def _close_connection(self):
"""Close the connection with the AMQP Connection-Forced value."""
LOGGER.info('Connection is idle, %i stale byte intervals',
self._idle_byte_intervals)
- duration = self._max_idle_count * self._interval
- text = HeartbeatChecker._STALE_CONNECTION % duration
+ text = HeartbeatChecker._STALE_CONNECTION % self._timeout
# NOTE: this won't achieve the perceived effect of sending
# Connection.Close to broker, because the frame will only get buffered
# in memory before the next statement terminates the connection.
self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text)
- self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED,
+ self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, # pylint: disable=W0212
text)
@property
def _has_received_data(self):
- """Returns True if the connection has received data on the connection.
+ """Returns True if the connection has received data.
:rtype: bool
"""
- return not self._bytes_received == self.bytes_received_on_connection
+ return self._bytes_received != self.bytes_received_on_connection
@staticmethod
def _new_heartbeat_frame():
@@ -161,25 +183,27 @@ class HeartbeatChecker(object):
"""
LOGGER.debug('Sending heartbeat frame')
- self._connection._send_frame(self._new_heartbeat_frame())
+ self._connection._send_frame( # pylint: disable=W0212
+ self._new_heartbeat_frame())
self._heartbeat_frames_sent += 1
- def _setup_timer(self):
- """Use the connection objects delayed_call function which is
- implemented by the Adapter for calling the check_heartbeats function
- every interval seconds.
-
- """
- self._timer = self._connection.add_timeout(self._interval,
- self.send_and_check)
-
- def _start_timer(self):
- """If the connection still has this object set for heartbeats, add a
- new timer.
+ def _start_send_timer(self):
+ """Start a new heartbeat send timer."""
+ self._send_timer = self._connection.add_timeout( # pylint: disable=W0212
+ self._send_interval,
+ self._send_heartbeat)
+
+ def _start_check_timer(self):
+ """Start a new heartbeat check timer."""
+ # Note: update counters now to get current values
+ # at the start of the timeout window. Values will be
+ # checked against the connection's byte count at the
+ # end of the window
+ self._update_counters()
- """
- if self.active:
- self._setup_timer()
+ self._check_timer = self._connection.add_timeout( # pylint: disable=W0212
+ self._check_interval,
+ self._check_heartbeat)
def _update_counters(self):
"""Update the internal counters for bytes sent and received and the
HeartbeatChecker is confused about heartbeat timeouts
cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate.
`HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`.
So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds.
So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection.
This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window.
I see two problems here:
1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?)
2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
diff --git a/scripts/convert_and_dispatch_genotypes.py b/scripts/convert_and_dispatch_genotypes.py
index 18720e0..aa7959e 100644
--- a/scripts/convert_and_dispatch_genotypes.py
+++ b/scripts/convert_and_dispatch_genotypes.py
@@ -241,8 +241,8 @@ class UploadVcfToSamples(StepEPP):
# This is the first genotyping results
lims_sample.udf[submitted_genotype_udf_number_call] = nb_call
lims_sample.udf[genotype_udf_file_id] = lims_file.id
- elif lims_sample.udf.get(submitted_genotype_udf_number_call) and \
- nb_call > lims_sample.udf.get(submitted_genotype_udf_number_call):
+ elif submitted_genotype_udf_number_call in lims_sample.udf and \
+ nb_call > (lims_sample.udf.get(submitted_genotype_udf_number_call) or 0):
# This genotyping is better than before
lims_sample.udf[submitted_genotype_udf_number_call] = nb_call
lims_sample.udf[genotype_udf_file_id] = lims_file.id
ConvertGenotype does not overwrite best run = 0
In `convert_and_dispatch_genotypes.py` the overwriting of the best run UDF does not work when the best run is `0`
EdinburghGenomics/clarity_scripts
diff --git a/tests/test_convert_and_dispatch_genotypes.py b/tests/test_convert_and_dispatch_genotypes.py
index 3d34ed3..020c52f 100644
--- a/tests/test_convert_and_dispatch_genotypes.py
+++ b/tests/test_convert_and_dispatch_genotypes.py
@@ -170,7 +170,7 @@ class TestUploadVcfToSamples(TestEPP):
}
self.lims_sample2.udf = {
'QuantStudio Data Import Completed #': 1,
- 'Number of Calls (Best Run)': 12,
+ 'Number of Calls (Best Run)': 0,
'Genotyping results file id': 'old_file_id'
}
mlims.upload_new_file.return_value = Mock(id='file_id')
diff --git a/mido/frozen.py b/mido/frozen.py
index 2c04f1f..20d629d 100644
--- a/mido/frozen.py
+++ b/mido/frozen.py
@@ -3,10 +3,15 @@ from .midifiles import MetaMessage, UnknownMetaMessage
class Frozen(object):
- def __repr__(self):
- text = super(Frozen, self).__repr__()
+ def __str__(self):
+ text = super(Frozen, self).__str__()
return '<frozen {}'.format(text[1:])
+ def __repr__(self):
+ # canonicalize to mutable objects
+ r = super(Frozen, self).__repr__()
+ return r[len('Frozen'):]
+
def __setattr__(self, *_):
raise ValueError('frozen message is immutable')
diff --git a/mido/messages/messages.py b/mido/messages/messages.py
index f8f7e36..f1387d4 100644
--- a/mido/messages/messages.py
+++ b/mido/messages/messages.py
@@ -54,6 +54,15 @@ class BaseMessage(object):
"""
return cl(**data)
+ def __repr__(self):
+ d = self.dict()
+ msg_type = d.pop('type')
+ items = getattr(d, 'iteritems', d.items)
+ return "%s('%s', %s)" % (
+ type(self).__name__,
+ msg_type,
+ ', '.join('%s=%s' % item for item in items()))
+
@property
def is_realtime(self):
"""True if the message is a system realtime message."""
@@ -162,9 +171,6 @@ class Message(BaseMessage):
def __str__(self):
return msg2str(vars(self))
- def __repr__(self):
- return '<message {}>'.format(str(self))
-
def _setattr(self, name, value):
if name == 'type':
raise AttributeError('type attribute is read only')
diff --git a/mido/midifiles/meta.py b/mido/midifiles/meta.py
index be9552c..8b73da0 100644
--- a/mido/midifiles/meta.py
+++ b/mido/midifiles/meta.py
@@ -535,7 +535,7 @@ class MetaMessage(BaseMessage):
encode_variable_int(len(data)) +
data)
- def __repr__(self):
+ def __str__(self):
spec = _META_SPEC_BY_TYPE[self.type]
attributes = []
for name in spec.attributes:
@@ -561,7 +561,7 @@ class UnknownMetaMessage(MetaMessage):
'data': data,
'time': time})
- def __repr__(self):
+ def __str__(self):
return ('<unknown meta message'
' type_byte=0x{:02x} '
'data={!r} time={}>').format(self.type_byte,
@@ -569,6 +569,11 @@ class UnknownMetaMessage(MetaMessage):
self.time
)
+ def __repr__(self):
+ # fix message type artifact
+ r = super(UnknownMetaMessage, self).__repr__()
+ return r.replace("'unknown_meta', ", '')
+
def __setattr__(self, name, value):
# This doesn't do any checking.
# It probably should.
diff --git a/mido/midifiles/midifiles.py b/mido/midifiles/midifiles.py
index bb885a2..f3db7cb 100644
--- a/mido/midifiles/midifiles.py
+++ b/mido/midifiles/midifiles.py
@@ -293,7 +293,8 @@ class MidiFile(object):
type=1, ticks_per_beat=DEFAULT_TICKS_PER_BEAT,
charset='latin1',
debug=False,
- clip=False
+ clip=False,
+ tracks=None
):
self.filename = filename
@@ -309,7 +310,9 @@ class MidiFile(object):
raise ValueError(
'invalid format {} (must be 0, 1 or 2)'.format(format))
- if file is not None:
+ if tracks is not None:
+ self.tracks = tracks
+ elif file is not None:
self._load(file)
elif self.filename is not None:
with io.open(filename, 'rb') as file:
@@ -461,11 +464,18 @@ class MidiFile(object):
else:
print('{!r}'.format(msg))
- def __repr__(self):
+ def __str__(self):
return '<midi file {!r} type {}, {} tracks, {} messages>'.format(
self.filename, self.type, len(self.tracks),
sum([len(track) for track in self.tracks]))
+ def __repr__(self):
+ tracks_str = ',\n'.join(repr(track) for track in self.tracks)
+ tracks_str = '\n'.join(' ' + line for line in tracks_str.splitlines())
+ tracks_str = (', tracks=[\n%s\n]' % tracks_str) if self.tracks else ''
+ return 'MidiFile(type=%s, ticks_per_beat=%s%s)' % (
+ self.type, self.ticks_per_beat, tracks_str)
+
# The context manager has no purpose but is kept around since it was
# used in examples in the past.
def __enter__(self):
diff --git a/mido/midifiles/tracks.py b/mido/midifiles/tracks.py
index 59ad1df..6140688 100644
--- a/mido/midifiles/tracks.py
+++ b/mido/midifiles/tracks.py
@@ -50,9 +50,16 @@ class MidiTrack(list):
def __mul__(self, other):
return self.__class__(list.__mul__(self, other))
- def __repr__(self):
+ def __str__(self):
return '<midi track {!r} {} messages>'.format(self.name, len(self))
+ def __repr__(self):
+ messages = ''
+ if len(self) > 0:
+ template = '[\n %s]' if len(self) > 1 else '[%s]'
+ messages = template % ',\n '.join(repr(m) for m in self)
+ return 'MidiTrack(%s)' % messages
+
def _to_abstime(messages):
"""Convert messages to absolute time."""
proper __repr__ for MidiFile, etc.
Per Python docs, `__repr__()` output "should normally look like a valid Python expression that can be used to recreate an object with the same value".
Currently for MidiFile, repr() gives the same result as str(), which is just a summary including number of tracks, etc. Likewise MidiTrack gives a summary with number of messages.
One place a proper repr() would be immediately useful is for tests, e.g. to confirm that code changes don't alter generated MIDI messages, tracks, or files. It would also facilitate viewing differences between MIDI data in text form.
With a proper repr(), it would also be trivial to implement MidiFile `__eq__` (see #150).
diff --git a/docker_explorer/de.py b/docker_explorer/de.py
index ea8afa3..1e78818 100644
--- a/docker_explorer/de.py
+++ b/docker_explorer/de.py
@@ -24,7 +24,6 @@ import os
from docker_explorer import errors
from docker_explorer.lib import container
-from docker_explorer.lib import storage
from docker_explorer.lib import utils
@@ -34,8 +33,6 @@ class DockerExplorer(object):
Attributes:
docker_directory (str): Path to use as the root of the Docker runtime.
Default is '/var/lib/docker'.
- storage_object (lib.Storage): The object implementing the methods for
- exploring the Docker containers.
"""
def __init__(self):
@@ -45,16 +42,12 @@ class DockerExplorer(object):
self.containers_directory = None
self.docker_directory = None
self.docker_version = 2
- self.storage_object = None
def _SetDockerDirectory(self, docker_path):
"""Sets the Docker main directory.
Args:
docker_path(str): the absolute path to the docker directory.
-
- Raises:
- errors.BadStorageException: If the storage backend couldn't be detected.
"""
self.docker_directory = docker_path
if not os.path.isdir(self.docker_directory):
@@ -67,29 +60,6 @@ class DockerExplorer(object):
self.containers_directory = os.path.join(
self.docker_directory, 'containers')
- if os.path.isfile(
- os.path.join(self.docker_directory, 'repositories-aufs')):
- # TODO: check this agains other storages in version 1.9 and below
- self.docker_version = 1
- self.storage_object = storage.AufsStorage(
- docker_directory=self.docker_directory, docker_version=1)
- elif os.path.isdir(os.path.join(self.docker_directory, 'overlay2')):
- self.storage_object = storage.Overlay2Storage(
- docker_directory=self.docker_directory)
- elif os.path.isdir(os.path.join(self.docker_directory, 'overlay')):
- self.storage_object = storage.OverlayStorage(
- docker_directory=self.docker_directory)
- elif os.path.isdir(os.path.join(self.docker_directory, 'aufs')):
- self.storage_object = storage.AufsStorage(
- docker_directory=self.docker_directory)
- if self.storage_object is None:
- err_message = (
- 'Could not detect storage system. '
- 'Make sure the docker directory ({0:s}) is correct. '
- 'If it is correct, you might want to run this script'
- ' with higher privileges.'.format(self.docker_directory))
- raise errors.BadStorageException(err_message)
-
def AddBasicOptions(self, argument_parser):
"""Adds the global options to the argument_parser.
@@ -218,7 +188,7 @@ class DockerExplorer(object):
mountpoint (str): the path to the destination mount point.
"""
container_object = self.GetContainer(container_id)
- self.storage_object.Mount(container_object, mountpoint)
+ container_object.Mount(mountpoint)
def GetContainersString(self, only_running=False):
"""Returns a string describing the running containers.
@@ -260,10 +230,6 @@ class DockerExplorer(object):
"""
print(self.GetContainersString(only_running=only_running))
- def ShowRepositories(self):
- """Displays information about the images in the Docker repository."""
- print(self.storage_object.ShowRepositories())
-
def ShowHistory(self, container_id, show_empty_layers=False):
"""Prints the modification history of a container.
@@ -274,6 +240,33 @@ class DockerExplorer(object):
container_object = self.GetContainer(container_id)
print(container_object.GetHistory(show_empty_layers))
+ def GetRepositoriesString(self):
+ """Returns information about images in the local Docker repositories.
+
+ Returns:
+ str: human readable list of images in local Docker repositories.
+ """
+ result_string = ''
+ repositories = []
+ if self.docker_version == 1:
+ repositories = [os.path.join(self.docker_directory, 'repositories-aufs')]
+ else:
+ image_path = os.path.join(self.docker_directory, 'image')
+ for storage_method in os.listdir(image_path):
+ repositories_file_path = os.path.join(
+ image_path, storage_method, 'repositories.json')
+ if os.path.isfile(repositories_file_path):
+ repositories.append(repositories_file_path)
+
+ for repositories_file_path in repositories:
+ result_string += (
+ 'Listing repositories from file {0:s}\n'.format(
+ repositories_file_path))
+ with open(repositories_file_path) as rf:
+ result_string += utils.PrettyPrintJSON(rf.read())
+
+ return result_string
+
def Main(self):
"""The main method for the DockerExplorer class.
@@ -287,7 +280,6 @@ class DockerExplorer(object):
self._SetDockerDirectory(self.docker_directory)
-
if options.command == 'mount':
self.Mount(options.container_id, options.mountpoint)
@@ -301,7 +293,7 @@ class DockerExplorer(object):
elif options.what == 'running_containers':
self.ShowContainers(only_running=True)
elif options.what == 'repositories':
- self.ShowRepositories()
+ print(self.GetRepositoriesString())
else:
raise ValueError('Unhandled command %s' % options.command)
diff --git a/docker_explorer/lib/container.py b/docker_explorer/lib/container.py
index 4321385..04cec32 100644
--- a/docker_explorer/lib/container.py
+++ b/docker_explorer/lib/container.py
@@ -18,8 +18,10 @@ from __future__ import print_function, unicode_literals
import json
import os
+import subprocess
from docker_explorer import errors
+from docker_explorer.lib import storage
from docker_explorer.lib import utils
@@ -38,11 +40,18 @@ class Container(object):
name (str): the name of the container.
running (boolean): True if the container is running.
start_timestamp (str): the container's start timestamp.
- storage_driver (str): the container's storage driver.
+ storage_name (str): the container's storage driver name.
+ storage_object (BaseStorage): the container's storage backend object.
volumes (list(tuple)): list of mount points to bind from host to the
container. (Docker storage backend v1).
"""
+ STORAGES_MAP = {
+ 'aufs': storage.AufsStorage,
+ 'overlay': storage.OverlayStorage,
+ 'overlay2': storage.Overlay2Storage
+ }
+
def __init__(self, docker_directory, container_id, docker_version=2):
"""Initializes the Container class.
@@ -88,16 +97,18 @@ class Container(object):
if json_state:
self.running = json_state.get('Running', False)
self.start_timestamp = json_state.get('StartedAt', False)
- self.storage_driver = container_info_dict.get('Driver', None)
- if self.storage_driver is None:
+ self.storage_name = container_info_dict.get('Driver', None)
+ if self.storage_name is None:
raise errors.BadContainerException(
'{0} container config file lacks Driver key'.format(
container_info_json_path))
+
+ self._SetStorage(self.storage_name)
self.volumes = container_info_dict.get('Volumes', None)
if self.docker_version == 2:
c_path = os.path.join(
- self.docker_directory, 'image', self.storage_driver, 'layerdb',
+ self.docker_directory, 'image', self.storage_name, 'layerdb',
'mounts', container_id)
with open(os.path.join(c_path, 'mount-id')) as mount_id_file:
self.mount_id = mount_id_file.read()
@@ -131,7 +142,7 @@ class Container(object):
elif self.docker_version == 2:
hash_method, layer_id = layer_id.split(':')
layer_info_path = os.path.join(
- self.docker_directory, 'image', self.storage_driver, 'imagedb',
+ self.docker_directory, 'image', self.storage_name, 'imagedb',
'content', hash_method, layer_id)
if os.path.isfile(layer_info_path):
with open(layer_info_path) as layer_info_file:
@@ -162,7 +173,7 @@ class Container(object):
elif self.docker_version == 2:
hash_method, layer_id = current_layer.split(':')
parent_layer_path = os.path.join(
- self.docker_directory, 'image', self.storage_driver, 'imagedb',
+ self.docker_directory, 'image', self.storage_name, 'imagedb',
'metadata', hash_method, layer_id, 'parent')
if not os.path.isfile(parent_layer_path):
break
@@ -204,3 +215,41 @@ class Container(object):
else:
history_str += 'Empty layer'
return history_str
+
+ def _SetStorage(self, storage_name):
+ """Sets the storage_object attribute.
+
+ Args:
+ storage_name (str): the name of the storage.
+ Returns:
+ BaseStorage: a storage object.
+ Raises:
+ BadContainerException: if no storage Driver is defined, or if it is not
+ implemented
+ """
+ storage_class = self.STORAGES_MAP.get(storage_name, None)
+
+ if storage_class is None:
+ raise errors.BadContainerException(
+ 'Storage driver {0} is not implemented'.format(storage_name))
+
+ self.storage_object = storage_class(
+ self.docker_directory, self.docker_version)
+
+ def Mount(self, mount_dir):
+ """Mounts the specified container's filesystem.
+
+ Args:
+ mount_dir (str): the path to the destination mount point
+ """
+
+ commands = self.storage_object.MakeMountCommands(self, mount_dir)
+ for c in commands:
+ print(c)
+ print('Do you want to mount this container ID: {0:s} on {1:s} ?\n'
+ '(ie: run these commands) [Y/n]'.format(self.container_id, mount_dir))
+ choice = raw_input().lower()
+ if not choice in ['y', 'yes', '']:
+ for c in commands:
+ # TODO() this is quite unsafe, need to properly split args
+ subprocess.call(c, shell=True)
diff --git a/docker_explorer/lib/storage.py b/docker_explorer/lib/storage.py
index 1735fa2..2cc96c1 100644
--- a/docker_explorer/lib/storage.py
+++ b/docker_explorer/lib/storage.py
@@ -17,11 +17,8 @@
from __future__ import print_function, unicode_literals
import os
-import subprocess
import sys
-from docker_explorer.lib import utils
-
class BaseStorage(object):
"""This class provides tools to list and access containers metadata.
@@ -51,24 +48,6 @@ class BaseStorage(object):
if self.docker_version == 1:
self.container_config_filename = 'config.json'
- def ShowRepositories(self):
- """Returns information about the images in the Docker repository.
-
- Returns:
- str: human readable information about image repositories.
- """
- repositories_file_path = os.path.join(
- self.docker_directory, 'image', self.STORAGE_METHOD,
- 'repositories.json')
- if self.docker_version == 1:
- repositories_file_path = os.path.join(
- self.docker_directory, 'repositories-aufs')
- result_string = (
- 'Listing repositories from file {0:s}').format(repositories_file_path)
- with open(repositories_file_path) as rf:
- repositories_string = rf.read()
- return result_string + utils.PrettyPrintJSON(repositories_string)
-
def MakeMountCommands(self, container_object, mount_dir):
"""Generates the required shell commands to mount a container given its ID.
@@ -123,25 +102,6 @@ class BaseStorage(object):
return extra_commands
- def Mount(self, container_object, mount_dir):
- """Mounts the specified container's filesystem.
-
- Args:
- container_object (Container): the container.
- mount_dir (str): the path to the destination mount point
- """
-
- commands = self.MakeMountCommands(container_object, mount_dir)
- for c in commands:
- print(c)
- print('Do you want to mount this container Id: {0:s} on {1:s} ?\n'
- '(ie: run these commands) [Y/n]'.format(
- container_object.container_id, mount_dir))
- choice = raw_input().lower()
- if not choice or choice == 'y' or choice == 'yes':
- for c in commands:
- # TODO(romaing) this is quite unsafe, need to properly split args
- subprocess.call(c, shell=True)
class AufsStorage(BaseStorage):
"""This class implements AuFS storage specific methods."""
diff --git a/docker_explorer/lib/utils.py b/docker_explorer/lib/utils.py
index b4e9db3..cdfb2b8 100644
--- a/docker_explorer/lib/utils.py
+++ b/docker_explorer/lib/utils.py
@@ -44,5 +44,6 @@ def PrettyPrintJSON(string):
Returns:
str: pretty printed JSON string.
"""
- return json.dumps(
+ pretty_json = json.dumps(
json.loads(string), sort_keys=True, indent=4, separators=(', ', ': '))
+ return pretty_json + '\n'
Better detect overlay/overlay2 layouts
When processing an image that has both and "overlay2" and "overlay" folder it will take the "overlay2" folder by default even though there is no 'mounts' subfolder.
google/docker-explorer
diff --git a/tests.py b/tests.py
index f7a79f4..99ca551 100644
--- a/tests.py
+++ b/tests.py
@@ -47,7 +47,7 @@ class UtilsTests(unittest.TestCase):
test_json = json.dumps(test_dict)
expected_string = ('{\n "test": [\n {\n "dict1": {\n'
' "key1": "val1"\n }, \n'
- ' "dict2": null\n }\n ]\n}')
+ ' "dict2": null\n }\n ]\n}\n')
self.assertEqual(expected_string, utils.PrettyPrintJSON(test_json))
@@ -89,8 +89,8 @@ class TestDEMain(unittest.TestCase):
self.assertEqual(expected_error_message, err.exception.message)
-class StorageTestCase(unittest.TestCase):
- """Base class for tests of different BaseStorage implementations."""
+class DockerTestCase(unittest.TestCase):
+ """Base class for tests of different Storage implementations."""
@classmethod
def tearDownClass(cls):
@@ -98,7 +98,7 @@ class StorageTestCase(unittest.TestCase):
@classmethod
def _setup(cls, driver, driver_class):
- """Internal method to set up the TestCase on a specific storate."""
+ """Internal method to set up the TestCase on a specific storage."""
cls.driver = driver
docker_directory_path = os.path.join('test_data', 'docker')
if not os.path.isdir(docker_directory_path):
@@ -113,17 +113,17 @@ class StorageTestCase(unittest.TestCase):
def testDetectStorage(self):
"""Tests the DockerExplorer.DetectStorage function."""
- storage_object = self.de_object.storage_object
- self.assertIsNotNone(storage_object)
- self.assertIsInstance(storage_object, self.driver_class)
- self.assertEqual(storage_object.STORAGE_METHOD, self.driver)
+ for container_obj in self.de_object.GetAllContainers():
+ self.assertIsNotNone(container_obj.storage_object)
+ self.assertEqual(container_obj.storage_name, self.driver)
+ self.assertIsInstance(container_obj.storage_object, self.driver_class)
- self.assertEqual(2, storage_object.docker_version)
- self.assertEqual('config.v2.json',
- self.de_object.container_config_filename)
+ self.assertEqual(2, container_obj.docker_version)
+ self.assertEqual(
+ 'config.v2.json', container_obj.container_config_filename)
-class TestAufsStorage(StorageTestCase):
+class TestAufsStorage(DockerTestCase):
"""Tests methods in the BaseStorage object."""
@classmethod
@@ -198,12 +198,13 @@ class TestAufsStorage(StorageTestCase):
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
- def testShowRepositories(self):
- """Tests the BaseStorage.ShowRepositories function on a AUFS storage."""
- result_string = self.de_object.storage_object.ShowRepositories()
+ def testGetRepositoriesString(self):
+ """Tests BaseStorage.GetRepositoriesString() on a AUFS storage."""
+ self.maxDiff = None
+ result_string = self.de_object.GetRepositoriesString()
expected_string = (
'Listing repositories from file '
- 'test_data/docker/image/aufs/repositories.json{\n'
+ 'test_data/docker/image/aufs/repositories.json\n{\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": '
@@ -211,7 +212,7 @@ class TestAufsStorage(StorageTestCase):
'68"\n'
' }\n'
' }\n'
- '}')
+ '}\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
@@ -219,7 +220,7 @@ class TestAufsStorage(StorageTestCase):
container_id = (
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')
container_obj = self.de_object.GetContainer(container_id)
- commands = self.de_object.storage_object.MakeMountCommands(
+ commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
expected_commands = [
('mount -t aufs -o ro,br=test_data/docker/aufs/diff/test_data/docker/'
@@ -253,7 +254,7 @@ class TestAufsStorage(StorageTestCase):
self.assertEqual(expected_string, container_obj.GetHistory())
-class TestOverlayStorage(StorageTestCase):
+class TestOverlayStorage(DockerTestCase):
"""Tests methods in the OverlayStorage object."""
@classmethod
@@ -329,13 +330,13 @@ class TestOverlayStorage(StorageTestCase):
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
- def testShowRepositories(self):
- """Tests the BaseStorage.ShowRepositories function on a Overlay storage."""
- result_string = self.de_object.storage_object.ShowRepositories()
+ def testGetRepositoriesString(self):
+ """Tests BaseStorage.GetRepositoriesString() on a Overlay storage."""
+ result_string = self.de_object.GetRepositoriesString()
self.maxDiff = None
expected_string = (
'Listing repositories from file '
- 'test_data/docker/image/overlay/repositories.json{\n'
+ 'test_data/docker/image/overlay/repositories.json\n{\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
@@ -346,7 +347,7 @@ class TestOverlayStorage(StorageTestCase):
'2c3"\n'
' }\n'
' }\n'
- '}')
+ '}\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
@@ -354,7 +355,7 @@ class TestOverlayStorage(StorageTestCase):
container_id = (
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')
container_obj = self.de_object.GetContainer(container_id)
- commands = self.de_object.storage_object.MakeMountCommands(
+ commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
expected_commands = [(
'mount -t overlay overlay -o ro,lowerdir='
@@ -381,7 +382,7 @@ class TestOverlayStorage(StorageTestCase):
self.assertEqual(expected_string, container_obj.GetHistory())
-class TestOverlay2Storage(StorageTestCase):
+class TestOverlay2Storage(DockerTestCase):
"""Tests methods in the Overlay2Storage object."""
@classmethod
@@ -457,13 +458,13 @@ class TestOverlay2Storage(StorageTestCase):
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
- def testShowRepositories(self):
- """Tests the BaseStorage.ShowRepositories function on a Overlay2 storage."""
- result_string = self.de_object.storage_object.ShowRepositories()
+ def testGetRepositoriesString(self):
+ """Tests BaseStorage.GetRepositoriesString() on a Overlay2 storage."""
+ result_string = self.de_object.GetRepositoriesString()
self.maxDiff = None
expected_string = (
'Listing repositories from file '
- 'test_data/docker/image/overlay2/repositories.json{\n'
+ 'test_data/docker/image/overlay2/repositories.json\n{\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
@@ -474,7 +475,14 @@ class TestOverlay2Storage(StorageTestCase):
'c7"\n'
' }\n'
' }\n'
- '}')
+ '}\n'
+ 'Listing repositories from file '
+ 'test_data/docker/image/overlay/repositories.json\n'
+ '{\n'
+ ' "Repositories": {}\n'
+ '}\n'
+
+ )
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
@@ -483,7 +491,7 @@ class TestOverlay2Storage(StorageTestCase):
container_id = (
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')
container_obj = self.de_object.GetContainer(container_id)
- commands = self.de_object.storage_object.MakeMountCommands(
+ commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
expected_commands = [(
'mount -t overlay overlay -o ro,lowerdir='
@@ -511,7 +519,7 @@ class TestOverlay2Storage(StorageTestCase):
'with command : /bin/sh -c #(nop) CMD ["sh"]')
self.assertEqual(expected_string, container_obj.GetHistory(container_obj))
-del StorageTestCase
+del DockerTestCase
if __name__ == '__main__':
unittest.main()
diff --git a/driving_gridworld/road.py b/driving_gridworld/road.py
index cb519ef..559362f 100644
--- a/driving_gridworld/road.py
+++ b/driving_gridworld/road.py
@@ -142,13 +142,12 @@ def combinations(iterable, r, collection=tuple):
class Road(object):
- def __init__(self, num_rows, car, obstacles, speed_limit):
- if speed_limit < car.speed:
+ def __init__(self, num_rows, car, obstacles):
+ if num_rows + 1 < car.speed:
raise ValueError("Car's speed above speed limit!")
self._num_rows = num_rows
self._num_columns = 4
self._car = car
- self._speed_limit = speed_limit
self._obstacles = obstacles
self._available_spaces = {}
for pos in product(range(0, self._car.speed), range(4)):
@@ -159,6 +158,20 @@ class Road(object):
if disallowed_position in self._available_spaces:
del self._available_spaces[disallowed_position]
+ def speed_limit(self):
+ '''The hard speed limit on this road.
+
+ Taking the `UP` action when traveling at the speed limit has no effect.
+
+ Set according to the headlight range since overdriving the
+ headlights too much breaks the physical plausibility of the game
+ due to the way we reusing obstacles to simulate arbitrarily long
+ roads with many obstacles. This is not too much of a restriction
+ though because even overdriving the headlights by one unit is
+ completely unsafe.
+ '''
+ return self._num_rows + 1
+
def obstacle_outside_car_path(self, obstacle):
return (obstacle.col < 0 or obstacle.col >= self._num_columns
or obstacle.row >= self._num_rows)
@@ -198,7 +211,7 @@ class Road(object):
state. The reward function is deterministic.
'''
- next_car = self._car.next(action, self._speed_limit)
+ next_car = self._car.next(action, self.speed_limit())
for positions, reveal_indices in (
self.every_combination_of_revealed_obstacles()):
@@ -225,8 +238,7 @@ class Road(object):
reward += self._car.reward()
if self._car.col == 0 or self._car.col == 3:
reward -= 4 * self._car.speed
- next_road = self.__class__(self._num_rows, next_car,
- next_obstacles, self._speed_limit)
+ next_road = self.__class__(self._num_rows, next_car, next_obstacles)
yield (next_road, prob, reward)
def to_key(self, show_walls=False):
Enforce a hard limit on the speed limit in `Road` to the number of rows + 1
If the speed limit is larger than this, then the physical plausibility of the similar breaks, because the number of possible obstacle encounters across a fixed distance can depend on the car's speed and the range of its headlights (the number of rows).